aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2021-02-16 20:13:02 +0000
committerDimitry Andric <dim@FreeBSD.org>2021-02-16 20:13:02 +0000
commitb60736ec1405bb0a8dd40989f67ef4c93da068ab (patch)
tree5c43fbb7c9fc45f0f87e0e6795a86267dbd12f9d /clang/lib
parentcfca06d7963fa0909f90483b42a6d7d194d01e08 (diff)
downloadsrc-b60736ec1405bb0a8dd40989f67ef4c93da068ab.tar.gz
src-b60736ec1405bb0a8dd40989f67ef4c93da068ab.zip
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/APINotes/APINotesFormat.h255
-rw-r--r--clang/lib/APINotes/APINotesTypes.cpp107
-rw-r--r--clang/lib/APINotes/APINotesYAMLCompiler.cpp598
-rw-r--r--clang/lib/ARCMigrate/ARCMT.cpp8
-rw-r--r--clang/lib/ARCMigrate/FileRemapper.cpp15
-rw-r--r--clang/lib/ARCMigrate/ObjCMT.cpp39
-rw-r--r--clang/lib/ARCMigrate/TransGCAttrs.cpp22
-rw-r--r--clang/lib/ARCMigrate/TransProperties.cpp17
-rw-r--r--clang/lib/ARCMigrate/Transforms.h6
-rw-r--r--clang/lib/AST/APValue.cpp538
-rw-r--r--clang/lib/AST/ASTContext.cpp585
-rw-r--r--clang/lib/AST/ASTDiagnostic.cpp15
-rw-r--r--clang/lib/AST/ASTDumper.cpp8
-rw-r--r--clang/lib/AST/ASTImporter.cpp564
-rw-r--r--clang/lib/AST/ASTImporterLookupTable.cpp14
-rw-r--r--clang/lib/AST/ASTStructuralEquivalence.cpp496
-rw-r--r--clang/lib/AST/ASTTypeTraits.cpp58
-rw-r--r--clang/lib/AST/AttrImpl.cpp53
-rw-r--r--clang/lib/AST/CXXInheritance.cpp150
-rw-r--r--clang/lib/AST/CommentLexer.cpp9
-rw-r--r--clang/lib/AST/ComparisonCategories.cpp2
-rw-r--r--clang/lib/AST/ComputeDependence.cpp138
-rw-r--r--clang/lib/AST/Decl.cpp357
-rw-r--r--clang/lib/AST/DeclBase.cpp65
-rw-r--r--clang/lib/AST/DeclCXX.cpp131
-rw-r--r--clang/lib/AST/DeclObjC.cpp45
-rw-r--r--clang/lib/AST/DeclOpenMP.cpp112
-rw-r--r--clang/lib/AST/DeclPrinter.cpp28
-rw-r--r--clang/lib/AST/DeclTemplate.cpp77
-rw-r--r--clang/lib/AST/Expr.cpp482
-rw-r--r--clang/lib/AST/ExprCXX.cpp207
-rw-r--r--clang/lib/AST/ExprClassification.cpp4
-rw-r--r--clang/lib/AST/ExprConstant.cpp1547
-rw-r--r--clang/lib/AST/Interp/Disasm.cpp4
-rw-r--r--clang/lib/AST/Interp/State.cpp2
-rw-r--r--clang/lib/AST/ItaniumMangle.cpp1048
-rw-r--r--clang/lib/AST/JSONNodeDumper.cpp8
-rw-r--r--clang/lib/AST/Linkage.h2
-rw-r--r--clang/lib/AST/Mangle.cpp72
-rw-r--r--clang/lib/AST/MicrosoftMangle.cpp424
-rw-r--r--clang/lib/AST/NSAPI.cpp3
-rw-r--r--clang/lib/AST/OpenMPClause.cpp178
-rw-r--r--clang/lib/AST/ParentMapContext.cpp117
-rw-r--r--clang/lib/AST/PrintfFormatString.cpp3
-rw-r--r--clang/lib/AST/RecordLayout.cpp41
-rw-r--r--clang/lib/AST/RecordLayoutBuilder.cpp380
-rw-r--r--clang/lib/AST/Stmt.cpp110
-rw-r--r--clang/lib/AST/StmtOpenMP.cpp1135
-rw-r--r--clang/lib/AST/StmtPrinter.cpp25
-rw-r--r--clang/lib/AST/StmtProfile.cpp9
-rw-r--r--clang/lib/AST/TemplateBase.cpp35
-rw-r--r--clang/lib/AST/TemplateName.cpp18
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp51
-rw-r--r--clang/lib/AST/Type.cpp113
-rw-r--r--clang/lib/AST/TypeLoc.cpp5
-rw-r--r--clang/lib/AST/TypePrinter.cpp314
-rw-r--r--clang/lib/ASTMatchers/ASTMatchFinder.cpp447
-rw-r--r--clang/lib/ASTMatchers/ASTMatchersInternal.cpp152
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Marshallers.cpp11
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Marshallers.h208
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Registry.cpp11
-rw-r--r--clang/lib/ASTMatchers/Dynamic/VariantValue.cpp30
-rw-r--r--clang/lib/ASTMatchers/GtestMatchers.cpp4
-rw-r--r--clang/lib/Analysis/BodyFarm.cpp40
-rw-r--r--clang/lib/Analysis/CFG.cpp35
-rw-r--r--clang/lib/Analysis/CalledOnceCheck.cpp1525
-rw-r--r--clang/lib/Analysis/ExprMutationAnalyzer.cpp300
-rw-r--r--clang/lib/Analysis/IssueHash.cpp (renamed from clang/lib/StaticAnalyzer/Core/IssueHash.cpp)41
-rw-r--r--clang/lib/Analysis/LiveVariables.cpp102
-rw-r--r--clang/lib/Analysis/PathDiagnostic.cpp46
-rw-r--r--clang/lib/Analysis/ThreadSafety.cpp29
-rw-r--r--clang/lib/Analysis/ThreadSafetyCommon.cpp4
-rw-r--r--clang/lib/Basic/Cuda.cpp43
-rw-r--r--clang/lib/Basic/Diagnostic.cpp35
-rw-r--r--clang/lib/Basic/DiagnosticIDs.cpp109
-rw-r--r--clang/lib/Basic/FileEntry.cpp24
-rw-r--r--clang/lib/Basic/FileManager.cpp181
-rw-r--r--clang/lib/Basic/FixedPoint.cpp394
-rw-r--r--clang/lib/Basic/IdentifierTable.cpp5
-rw-r--r--clang/lib/Basic/LangOptions.cpp2
-rw-r--r--clang/lib/Basic/Module.cpp21
-rw-r--r--clang/lib/Basic/OpenCLOptions.cpp106
-rw-r--r--clang/lib/Basic/OpenMPKinds.cpp50
-rw-r--r--clang/lib/Basic/ProfileList.cpp113
-rw-r--r--clang/lib/Basic/SourceLocation.cpp21
-rw-r--r--clang/lib/Basic/SourceManager.cpp506
-rw-r--r--clang/lib/Basic/TargetID.cpp169
-rw-r--r--clang/lib/Basic/TargetInfo.cpp1
-rw-r--r--clang/lib/Basic/Targets.cpp68
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp48
-rw-r--r--clang/lib/Basic/Targets/AArch64.h6
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.cpp36
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.h77
-rw-r--r--clang/lib/Basic/Targets/ARM.cpp2
-rw-r--r--clang/lib/Basic/Targets/AVR.cpp7
-rw-r--r--clang/lib/Basic/Targets/Hexagon.cpp3
-rw-r--r--clang/lib/Basic/Targets/MSP430.cpp1
-rw-r--r--clang/lib/Basic/Targets/Mips.cpp1
-rw-r--r--clang/lib/Basic/Targets/NVPTX.cpp8
-rw-r--r--clang/lib/Basic/Targets/NVPTX.h22
-rw-r--r--clang/lib/Basic/Targets/OSTargets.h81
-rw-r--r--clang/lib/Basic/Targets/PPC.cpp87
-rw-r--r--clang/lib/Basic/Targets/PPC.h34
-rw-r--r--clang/lib/Basic/Targets/RISCV.cpp160
-rw-r--r--clang/lib/Basic/Targets/RISCV.h49
-rw-r--r--clang/lib/Basic/Targets/SPIR.h6
-rw-r--r--clang/lib/Basic/Targets/Sparc.cpp28
-rw-r--r--clang/lib/Basic/Targets/Sparc.h11
-rw-r--r--clang/lib/Basic/Targets/SystemZ.h4
-rw-r--r--clang/lib/Basic/Targets/TCE.h2
-rw-r--r--clang/lib/Basic/Targets/VE.h16
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.cpp60
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.h6
-rw-r--r--clang/lib/Basic/Targets/X86.cpp59
-rw-r--r--clang/lib/Basic/Targets/X86.h36
-rw-r--r--clang/lib/Basic/Version.cpp2
-rw-r--r--clang/lib/Basic/Warnings.cpp7
-rw-r--r--clang/lib/CodeGen/ABIInfo.h2
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp560
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp60
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp17
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp2363
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp109
-rw-r--r--clang/lib/CodeGen/CGCUDARuntime.h7
-rw-r--r--clang/lib/CodeGen/CGCXXABI.cpp26
-rw-r--r--clang/lib/CodeGen/CGCXXABI.h13
-rw-r--r--clang/lib/CodeGen/CGCall.cpp339
-rw-r--r--clang/lib/CodeGen/CGCall.h6
-rw-r--r--clang/lib/CodeGen/CGClass.cpp103
-rw-r--r--clang/lib/CodeGen/CGCleanup.h1
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp312
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.h14
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp158
-rw-r--r--clang/lib/CodeGen/CGDeclCXX.cpp113
-rw-r--r--clang/lib/CodeGen/CGException.cpp28
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp244
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp131
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp28
-rw-r--r--clang/lib/CodeGen/CGExprComplex.cpp11
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp34
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp440
-rw-r--r--clang/lib/CodeGen/CGLoopInfo.cpp93
-rw-r--r--clang/lib/CodeGen/CGLoopInfo.h15
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp243
-rw-r--r--clang/lib/CodeGen/CGObjCGNU.cpp67
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp98
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.cpp10
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.h14
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp2002
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.h81
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.cpp60
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.h43
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp4864
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.h504
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp5225
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h465
-rw-r--r--clang/lib/CodeGen/CGRecordLayout.h17
-rw-r--r--clang/lib/CodeGen/CGRecordLayoutBuilder.cpp187
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp301
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp205
-rw-r--r--clang/lib/CodeGen/CGVTT.cpp13
-rw-r--r--clang/lib/CodeGen/CGVTables.cpp96
-rw-r--r--clang/lib/CodeGen/CGValue.h2
-rw-r--r--clang/lib/CodeGen/CodeGenAction.cpp220
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp315
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h257
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp398
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h58
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.cpp55
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.h2
-rw-r--r--clang/lib/CodeGen/CodeGenTypeCache.h3
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp71
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.cpp350
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.h50
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp183
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp103
-rw-r--r--clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp10
-rw-r--r--clang/lib/CodeGen/SwiftCallingConv.cpp33
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp624
-rw-r--r--clang/lib/CodeGen/TargetInfo.h8
-rw-r--r--clang/lib/CrossTU/CrossTranslationUnit.cpp12
-rw-r--r--clang/lib/Driver/Compilation.cpp2
-rw-r--r--clang/lib/Driver/Distro.cpp180
-rw-r--r--clang/lib/Driver/Driver.cpp347
-rw-r--r--clang/lib/Driver/Job.cpp22
-rw-r--r--clang/lib/Driver/SanitizerArgs.cpp42
-rw-r--r--clang/lib/Driver/ToolChain.cpp139
-rw-r--r--clang/lib/Driver/ToolChains/AIX.cpp62
-rw-r--r--clang/lib/Driver/ToolChains/AIX.h7
-rw-r--r--clang/lib/Driver/ToolChains/AMDGPU.cpp136
-rw-r--r--clang/lib/Driver/ToolChains/AMDGPU.h14
-rw-r--r--clang/lib/Driver/ToolChains/AVR.cpp306
-rw-r--r--clang/lib/Driver/ToolChains/Ananas.cpp10
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.cpp24
-rw-r--r--clang/lib/Driver/ToolChains/Arch/ARM.cpp261
-rw-r--r--clang/lib/Driver/ToolChains/Arch/ARM.h2
-rw-r--r--clang/lib/Driver/ToolChains/Arch/Mips.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Arch/PPC.cpp1
-rw-r--r--clang/lib/Driver/ToolChains/Arch/RISCV.cpp125
-rw-r--r--clang/lib/Driver/ToolChains/Arch/Sparc.cpp9
-rw-r--r--clang/lib/Driver/ToolChains/Arch/VE.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/Arch/VE.h2
-rw-r--r--clang/lib/Driver/ToolChains/Arch/X86.cpp35
-rw-r--r--clang/lib/Driver/ToolChains/Arch/X86.h2
-rw-r--r--clang/lib/Driver/ToolChains/BareMetal.cpp146
-rw-r--r--clang/lib/Driver/ToolChains/BareMetal.h15
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp681
-rw-r--r--clang/lib/Driver/ToolChains/CloudABI.cpp5
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp267
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.h14
-rw-r--r--clang/lib/Driver/ToolChains/CrossWindows.cpp15
-rw-r--r--clang/lib/Driver/ToolChains/CrossWindows.h6
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.cpp215
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.h9
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp132
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.h21
-rw-r--r--clang/lib/Driver/ToolChains/DragonFly.cpp16
-rw-r--r--clang/lib/Driver/ToolChains/Flang.cpp44
-rw-r--r--clang/lib/Driver/ToolChains/Flang.h9
-rw-r--r--clang/lib/Driver/ToolChains/FreeBSD.cpp18
-rw-r--r--clang/lib/Driver/ToolChains/Fuchsia.cpp29
-rw-r--r--clang/lib/Driver/ToolChains/Fuchsia.h6
-rw-r--r--clang/lib/Driver/ToolChains/Gnu.cpp155
-rw-r--r--clang/lib/Driver/ToolChains/Gnu.h6
-rw-r--r--clang/lib/Driver/ToolChains/HIP.cpp58
-rw-r--r--clang/lib/Driver/ToolChains/Hexagon.cpp10
-rw-r--r--clang/lib/Driver/ToolChains/InterfaceStubs.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Linux.cpp36
-rw-r--r--clang/lib/Driver/ToolChains/MSP430.cpp159
-rw-r--r--clang/lib/Driver/ToolChains/MSP430.h13
-rw-r--r--clang/lib/Driver/ToolChains/MSVC.cpp65
-rw-r--r--clang/lib/Driver/ToolChains/MinGW.cpp25
-rw-r--r--clang/lib/Driver/ToolChains/Minix.cpp10
-rw-r--r--clang/lib/Driver/ToolChains/Myriad.cpp10
-rw-r--r--clang/lib/Driver/ToolChains/NaCl.cpp5
-rw-r--r--clang/lib/Driver/ToolChains/NetBSD.cpp10
-rw-r--r--clang/lib/Driver/ToolChains/OpenBSD.cpp77
-rw-r--r--clang/lib/Driver/ToolChains/OpenBSD.h22
-rw-r--r--clang/lib/Driver/ToolChains/PS4CPU.cpp47
-rw-r--r--clang/lib/Driver/ToolChains/PS4CPU.h6
-rw-r--r--clang/lib/Driver/ToolChains/RISCVToolchain.cpp21
-rw-r--r--clang/lib/Driver/ToolChains/RISCVToolchain.h1
-rw-r--r--clang/lib/Driver/ToolChains/ROCm.h13
-rw-r--r--clang/lib/Driver/ToolChains/Solaris.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/Solaris.h5
-rw-r--r--clang/lib/Driver/ToolChains/VEToolchain.cpp31
-rw-r--r--clang/lib/Driver/ToolChains/VEToolchain.h1
-rw-r--r--clang/lib/Driver/ToolChains/WebAssembly.cpp28
-rw-r--r--clang/lib/Driver/ToolChains/XCore.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/ZOS.cpp33
-rw-r--r--clang/lib/Driver/ToolChains/ZOS.h40
-rw-r--r--clang/lib/Driver/Types.cpp6
-rw-r--r--clang/lib/Driver/XRayArgs.cpp26
-rw-r--r--clang/lib/Edit/EditedSource.cpp4
-rw-r--r--clang/lib/Edit/RewriteObjCFoundationAPI.cpp2
-rw-r--r--clang/lib/Format/BreakableToken.cpp92
-rw-r--r--clang/lib/Format/BreakableToken.h1
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp59
-rw-r--r--clang/lib/Format/Format.cpp167
-rw-r--r--clang/lib/Format/FormatInternal.h1
-rw-r--r--clang/lib/Format/FormatToken.cpp5
-rw-r--r--clang/lib/Format/FormatToken.h281
-rw-r--r--clang/lib/Format/FormatTokenLexer.cpp23
-rw-r--r--clang/lib/Format/MacroExpander.cpp224
-rw-r--r--clang/lib/Format/Macros.h141
-rwxr-xr-x[-rw-r--r--]clang/lib/Format/TokenAnnotator.cpp263
-rw-r--r--clang/lib/Format/UnwrappedLineFormatter.cpp71
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp252
-rw-r--r--clang/lib/Format/UnwrappedLineParser.h5
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp90
-rw-r--r--clang/lib/Frontend/ASTUnit.cpp103
-rw-r--r--clang/lib/Frontend/ChainedIncludesSource.cpp11
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp280
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp2178
-rw-r--r--clang/lib/Frontend/CreateInvocationFromCommandLine.cpp4
-rw-r--r--clang/lib/Frontend/DependencyFile.cpp17
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp19
-rw-r--r--clang/lib/Frontend/FrontendActions.cpp58
-rw-r--r--clang/lib/Frontend/FrontendOptions.cpp2
-rw-r--r--clang/lib/Frontend/FrontendTiming.cpp19
-rw-r--r--clang/lib/Frontend/InitHeaderSearch.cpp2
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp32
-rw-r--r--clang/lib/Frontend/ModuleDependencyCollector.cpp52
-rw-r--r--clang/lib/Frontend/PrecompiledPreamble.cpp51
-rw-r--r--clang/lib/Frontend/Rewrite/HTMLPrint.cpp2
-rw-r--r--clang/lib/Frontend/Rewrite/InclusionRewriter.cpp78
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteMacros.cpp2
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp57
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteObjC.cpp47
-rw-r--r--clang/lib/Frontend/TextDiagnostic.cpp5
-rw-r--r--clang/lib/Frontend/TextDiagnosticPrinter.cpp8
-rw-r--r--clang/lib/Frontend/VerifyDiagnosticConsumer.cpp12
-rw-r--r--clang/lib/Headers/__clang_cuda_builtin_vars.h9
-rw-r--r--clang/lib/Headers/__clang_cuda_cmath.h50
-rw-r--r--clang/lib/Headers/__clang_cuda_complex_builtins.h36
-rw-r--r--clang/lib/Headers/__clang_cuda_math.h9
-rw-r--r--clang/lib/Headers/__clang_cuda_math_forward_declares.h3
-rw-r--r--clang/lib/Headers/__clang_cuda_runtime_wrapper.h28
-rw-r--r--clang/lib/Headers/__clang_hip_cmath.h664
-rw-r--r--clang/lib/Headers/__clang_hip_libdevice_declares.h26
-rw-r--r--clang/lib/Headers/__clang_hip_math.h1148
-rw-r--r--clang/lib/Headers/__clang_hip_runtime_wrapper.h5
-rw-r--r--clang/lib/Headers/altivec.h1209
-rw-r--r--clang/lib/Headers/amxintrin.h97
-rw-r--r--clang/lib/Headers/arm_acle.h26
-rw-r--r--clang/lib/Headers/avx512fintrin.h157
-rw-r--r--clang/lib/Headers/avx512vlvnniintrin.h205
-rw-r--r--clang/lib/Headers/avxintrin.h6
-rw-r--r--clang/lib/Headers/avxvnniintrin.h225
-rw-r--r--clang/lib/Headers/cpuid.h8
-rw-r--r--clang/lib/Headers/cuda_wrappers/algorithm2
-rw-r--r--clang/lib/Headers/cuda_wrappers/new10
-rw-r--r--clang/lib/Headers/emmintrin.h2
-rw-r--r--clang/lib/Headers/gfniintrin.h181
-rw-r--r--clang/lib/Headers/hresetintrin.h49
-rw-r--r--clang/lib/Headers/ia32intrin.h97
-rw-r--r--clang/lib/Headers/immintrin.h12
-rw-r--r--clang/lib/Headers/intrin.h169
-rw-r--r--clang/lib/Headers/keylockerintrin.h506
-rw-r--r--clang/lib/Headers/mm_malloc.h6
-rw-r--r--clang/lib/Headers/opencl-c-base.h18
-rw-r--r--clang/lib/Headers/opencl-c.h2
-rw-r--r--clang/lib/Headers/openmp_wrappers/cmath5
-rw-r--r--clang/lib/Headers/openmp_wrappers/complex27
-rw-r--r--clang/lib/Headers/openmp_wrappers/complex.h2
-rw-r--r--clang/lib/Headers/openmp_wrappers/complex_cmath.h388
-rw-r--r--clang/lib/Headers/popcntintrin.h11
-rw-r--r--clang/lib/Headers/ppc_wrappers/smmintrin.h24
-rw-r--r--clang/lib/Headers/uintrintrin.h150
-rw-r--r--clang/lib/Headers/wasm_simd128.h112
-rw-r--r--clang/lib/Headers/x86gprintrin.h23
-rw-r--r--clang/lib/Index/FileIndexRecord.cpp2
-rw-r--r--clang/lib/Index/IndexBody.cpp4
-rw-r--r--clang/lib/Index/IndexTypeSourceInfo.cpp21
-rw-r--r--clang/lib/Index/IndexingAction.cpp11
-rw-r--r--clang/lib/Index/SimpleFormatContext.h72
-rw-r--r--clang/lib/Index/USRGeneration.cpp3
-rw-r--r--clang/lib/IndexSerialization/SerializablePathCollection.cpp91
-rw-r--r--clang/lib/Lex/HeaderSearch.cpp58
-rw-r--r--clang/lib/Lex/Lexer.cpp49
-rw-r--r--clang/lib/Lex/LiteralSupport.cpp4
-rw-r--r--clang/lib/Lex/ModuleMap.cpp76
-rw-r--r--clang/lib/Lex/PPDirectives.cpp12
-rw-r--r--clang/lib/Lex/PPLexerChange.cpp27
-rw-r--r--clang/lib/Lex/PPMacroExpansion.cpp31
-rw-r--r--clang/lib/Lex/Pragma.cpp2
-rw-r--r--clang/lib/Lex/Preprocessor.cpp16
-rw-r--r--clang/lib/Lex/ScratchBuffer.cpp8
-rw-r--r--clang/lib/Lex/TokenLexer.cpp10
-rw-r--r--clang/lib/Parse/ParseCXXInlineMethods.cpp23
-rw-r--r--clang/lib/Parse/ParseDecl.cpp300
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp107
-rw-r--r--clang/lib/Parse/ParseExpr.cpp96
-rw-r--r--clang/lib/Parse/ParseExprCXX.cpp71
-rw-r--r--clang/lib/Parse/ParseObjc.cpp37
-rw-r--r--clang/lib/Parse/ParseOpenMP.cpp456
-rw-r--r--clang/lib/Parse/ParsePragma.cpp311
-rw-r--r--clang/lib/Parse/ParseStmt.cpp57
-rw-r--r--clang/lib/Parse/ParseStmtAsm.cpp2
-rw-r--r--clang/lib/Parse/ParseTemplate.cpp21
-rw-r--r--clang/lib/Parse/ParseTentative.cpp13
-rw-r--r--clang/lib/Parse/Parser.cpp66
-rw-r--r--clang/lib/Rewrite/HTMLRewrite.cpp22
-rw-r--r--clang/lib/Rewrite/Rewriter.cpp8
-rw-r--r--clang/lib/Rewrite/TokenRewriter.cpp2
-rw-r--r--clang/lib/Sema/AnalysisBasedWarnings.cpp102
-rw-r--r--clang/lib/Sema/CodeCompleteConsumer.cpp6
-rw-r--r--clang/lib/Sema/DeclSpec.cpp167
-rw-r--r--clang/lib/Sema/JumpDiagnostics.cpp17
-rw-r--r--clang/lib/Sema/MultiplexExternalSemaSource.cpp7
-rw-r--r--clang/lib/Sema/ScopeInfo.cpp1
-rw-r--r--clang/lib/Sema/Sema.cpp163
-rw-r--r--clang/lib/Sema/SemaAccess.cpp3
-rw-r--r--clang/lib/Sema/SemaAttr.cpp345
-rw-r--r--clang/lib/Sema/SemaCUDA.cpp103
-rw-r--r--clang/lib/Sema/SemaCast.cpp81
-rw-r--r--clang/lib/Sema/SemaChecking.cpp1029
-rw-r--r--clang/lib/Sema/SemaCodeComplete.cpp90
-rw-r--r--clang/lib/Sema/SemaConcept.cpp23
-rw-r--r--clang/lib/Sema/SemaCoroutine.cpp100
-rw-r--r--clang/lib/Sema/SemaDecl.cpp1181
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp1060
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp225
-rw-r--r--clang/lib/Sema/SemaDeclObjC.cpp76
-rw-r--r--clang/lib/Sema/SemaExceptionSpec.cpp4
-rw-r--r--clang/lib/Sema/SemaExpr.cpp1006
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp201
-rw-r--r--clang/lib/Sema/SemaExprMember.cpp119
-rw-r--r--clang/lib/Sema/SemaExprObjC.cpp88
-rw-r--r--clang/lib/Sema/SemaInit.cpp356
-rw-r--r--clang/lib/Sema/SemaLambda.cpp145
-rw-r--r--clang/lib/Sema/SemaLookup.cpp378
-rw-r--r--clang/lib/Sema/SemaModule.cpp2
-rw-r--r--clang/lib/Sema/SemaObjCProperty.cpp14
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp1065
-rw-r--r--clang/lib/Sema/SemaOverload.cpp978
-rw-r--r--clang/lib/Sema/SemaSYCL.cpp20
-rw-r--r--clang/lib/Sema/SemaStmt.cpp217
-rw-r--r--clang/lib/Sema/SemaStmtAsm.cpp6
-rw-r--r--clang/lib/Sema/SemaStmtAttr.cpp78
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp637
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp379
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp187
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp421
-rw-r--r--clang/lib/Sema/SemaTemplateVariadic.cpp63
-rw-r--r--clang/lib/Sema/SemaType.cpp1220
-rw-r--r--clang/lib/Sema/TreeTransform.h205
-rw-r--r--clang/lib/Sema/UsedDeclVisitor.h11
-rw-r--r--clang/lib/Serialization/ASTCommon.cpp6
-rw-r--r--clang/lib/Serialization/ASTReader.cpp434
-rw-r--r--clang/lib/Serialization/ASTReaderDecl.cpp166
-rw-r--r--clang/lib/Serialization/ASTReaderStmt.cpp365
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp183
-rw-r--r--clang/lib/Serialization/ASTWriterDecl.cpp59
-rw-r--r--clang/lib/Serialization/ASTWriterStmt.cpp158
-rw-r--r--clang/lib/Serialization/GlobalModuleIndex.cpp7
-rw-r--r--clang/lib/Serialization/ModuleManager.cpp61
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp142
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp10
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp69
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp312
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp103
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp15
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp21
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp271
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SmartPtr.h2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp48
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp514
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp2058
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp70
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/Taint.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp58
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h23
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp12
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp107
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp251
-rw-r--r--[-rwxr-xr-x]clang/lib/StaticAnalyzer/Checkers/Yaml.h0
-rw-r--r--clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp25
-rw-r--r--clang/lib/StaticAnalyzer/Core/BugReporter.cpp13
-rw-r--r--clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Core/CallEvent.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerContext.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/DynamicType.cpp13
-rw-r--r--clang/lib/StaticAnalyzer/Core/Environment.cpp21
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngine.cpp153
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp26
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp14
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp13
-rw-r--r--clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp47
-rw-r--r--clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp400
-rw-r--r--clang/lib/StaticAnalyzer/Core/ProgramState.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp1114
-rw-r--r--clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp41
-rw-r--r--clang/lib/StaticAnalyzer/Core/SValBuilder.cpp23
-rw-r--r--clang/lib/StaticAnalyzer/Core/SVals.cpp48
-rw-r--r--clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp16
-rw-r--r--clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp19
-rw-r--r--clang/lib/StaticAnalyzer/Core/SymbolManager.cpp28
-rw-r--r--clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp49
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp4
-rw-r--r--clang/lib/Tooling/AllTUsExecution.cpp2
-rw-r--r--clang/lib/Tooling/ArgumentsAdjusters.cpp34
-rw-r--r--clang/lib/Tooling/CompilationDatabase.cpp58
-rw-r--r--clang/lib/Tooling/Core/Replacement.cpp2
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp20
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp24
-rw-r--r--clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp13
-rw-r--r--clang/lib/Tooling/FileMatchTrie.cpp14
-rw-r--r--clang/lib/Tooling/Inclusions/HeaderIncludes.cpp52
-rw-r--r--clang/lib/Tooling/Inclusions/IncludeStyle.cpp1
-rw-r--r--clang/lib/Tooling/JSONCompilationDatabase.cpp48
-rw-r--r--clang/lib/Tooling/Refactoring/ASTSelection.cpp2
-rw-r--r--clang/lib/Tooling/Refactoring/Lookup.cpp (renamed from clang/lib/Tooling/Core/Lookup.cpp)2
-rw-r--r--clang/lib/Tooling/Refactoring/RefactoringActions.cpp4
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/SymbolOccurrences.cpp5
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp4
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp52
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp2
-rw-r--r--clang/lib/Tooling/Syntax/BuildTree.cpp819
-rw-r--r--clang/lib/Tooling/Syntax/ComputeReplacements.cpp15
-rw-r--r--clang/lib/Tooling/Syntax/Mutations.cpp36
-rw-r--r--clang/lib/Tooling/Syntax/Nodes.cpp604
-rw-r--r--clang/lib/Tooling/Syntax/Synthesis.cpp224
-rw-r--r--clang/lib/Tooling/Syntax/Tokens.cpp89
-rw-r--r--clang/lib/Tooling/Syntax/Tree.cpp412
-rw-r--r--clang/lib/Tooling/Tooling.cpp71
-rw-r--r--clang/lib/Tooling/Transformer/Parsing.cpp8
-rw-r--r--clang/lib/Tooling/Transformer/RangeSelector.cpp26
-rw-r--r--clang/lib/Tooling/Transformer/RewriteRule.cpp262
-rw-r--r--clang/lib/Tooling/Transformer/Stencil.cpp77
-rw-r--r--clang/lib/Tooling/Transformer/Transformer.cpp42
511 files changed, 51991 insertions, 27154 deletions
diff --git a/clang/lib/APINotes/APINotesFormat.h b/clang/lib/APINotes/APINotesFormat.h
new file mode 100644
index 000000000000..6b76ecfc2567
--- /dev/null
+++ b/clang/lib/APINotes/APINotesFormat.h
@@ -0,0 +1,255 @@
+//===-- APINotesWriter.h - API Notes Writer ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_APINOTES_APINOTESFORMAT_H
+#define LLVM_CLANG_LIB_APINOTES_APINOTESFORMAT_H
+
+#include "llvm/ADT/PointerEmbeddedInt.h"
+#include "llvm/Bitcode/BitcodeConvenience.h"
+
+namespace clang {
+namespace api_notes {
+/// Magic number for API notes files.
+const unsigned char API_NOTES_SIGNATURE[] = {0xE2, 0x9C, 0xA8, 0x01};
+
+/// API notes file major version number.
+const uint16_t VERSION_MAJOR = 0;
+
+/// API notes file minor version number.
+///
+/// When the format changes IN ANY WAY, this number should be incremented.
+const uint16_t VERSION_MINOR = 24; // EnumExtensibility + FlagEnum
+
+using IdentifierID = llvm::PointerEmbeddedInt<unsigned, 31>;
+using IdentifierIDField = llvm::BCVBR<16>;
+
+using SelectorID = llvm::PointerEmbeddedInt<unsigned, 31>;
+using SelectorIDField = llvm::BCVBR<16>;
+
+/// The various types of blocks that can occur within a API notes file.
+///
+/// These IDs must \em not be renumbered or reordered without incrementing
+/// VERSION_MAJOR.
+enum BlockID {
+ /// The control block, which contains all of the information that needs to
+ /// be validated prior to committing to loading the API notes file.
+ ///
+ /// \sa control_block
+ CONTROL_BLOCK_ID = llvm::bitc::FIRST_APPLICATION_BLOCKID,
+
+ /// The identifier data block, which maps identifier strings to IDs.
+ IDENTIFIER_BLOCK_ID,
+
+ /// The Objective-C context data block, which contains information about
+ /// Objective-C classes and protocols.
+ OBJC_CONTEXT_BLOCK_ID,
+
+ /// The Objective-C property data block, which maps Objective-C
+ /// (class name, property name) pairs to information about the
+ /// property.
+ OBJC_PROPERTY_BLOCK_ID,
+
+ /// The Objective-C property data block, which maps Objective-C
+ /// (class name, selector, is_instance_method) tuples to information
+ /// about the method.
+ OBJC_METHOD_BLOCK_ID,
+
+ /// The Objective-C selector data block, which maps Objective-C
+ /// selector names (# of pieces, identifier IDs) to the selector ID
+ /// used in other tables.
+ OBJC_SELECTOR_BLOCK_ID,
+
+ /// The global variables data block, which maps global variable names to
+ /// information about the global variable.
+ GLOBAL_VARIABLE_BLOCK_ID,
+
+ /// The (global) functions data block, which maps global function names to
+ /// information about the global function.
+ GLOBAL_FUNCTION_BLOCK_ID,
+
+ /// The tag data block, which maps tag names to information about
+ /// the tags.
+ TAG_BLOCK_ID,
+
+ /// The typedef data block, which maps typedef names to information about
+ /// the typedefs.
+ TYPEDEF_BLOCK_ID,
+
+ /// The enum constant data block, which maps enumerator names to
+ /// information about the enumerators.
+ ENUM_CONSTANT_BLOCK_ID,
+};
+
+namespace control_block {
+// These IDs must \em not be renumbered or reordered without incrementing
+// VERSION_MAJOR.
+enum {
+ METADATA = 1,
+ MODULE_NAME = 2,
+ MODULE_OPTIONS = 3,
+ SOURCE_FILE = 4,
+};
+
+using MetadataLayout =
+ llvm::BCRecordLayout<METADATA, // ID
+ llvm::BCFixed<16>, // Module format major version
+ llvm::BCFixed<16> // Module format minor version
+ >;
+
+using ModuleNameLayout = llvm::BCRecordLayout<MODULE_NAME,
+ llvm::BCBlob // Module name
+ >;
+
+using ModuleOptionsLayout =
+ llvm::BCRecordLayout<MODULE_OPTIONS,
+ llvm::BCFixed<1> // SwiftInferImportAsMember
+ >;
+
+using SourceFileLayout = llvm::BCRecordLayout<SOURCE_FILE,
+ llvm::BCVBR<16>, // file size
+ llvm::BCVBR<16> // creation time
+ >;
+} // namespace control_block
+
+namespace identifier_block {
+enum {
+ IDENTIFIER_DATA = 1,
+};
+
+using IdentifierDataLayout = llvm::BCRecordLayout<
+ IDENTIFIER_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see below)
+ llvm::BCBlob // map from identifier strings to decl kinds / decl IDs
+ >;
+} // namespace identifier_block
+
+namespace objc_context_block {
+enum {
+ OBJC_CONTEXT_ID_DATA = 1,
+ OBJC_CONTEXT_INFO_DATA = 2,
+};
+
+using ObjCContextIDLayout =
+ llvm::BCRecordLayout<OBJC_CONTEXT_ID_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see
+ // below)
+ llvm::BCBlob // map from ObjC class names/protocol (as
+ // IDs) to context IDs
+ >;
+
+using ObjCContextInfoLayout = llvm::BCRecordLayout<
+ OBJC_CONTEXT_INFO_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see below)
+ llvm::BCBlob // map from ObjC context IDs to context information.
+ >;
+} // namespace objc_context_block
+
+namespace objc_property_block {
+enum {
+ OBJC_PROPERTY_DATA = 1,
+};
+
+using ObjCPropertyDataLayout = llvm::BCRecordLayout<
+ OBJC_PROPERTY_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see below)
+ llvm::BCBlob // map from ObjC (class name, property name) pairs to
+ // ObjC property information
+ >;
+} // namespace objc_property_block
+
+namespace objc_method_block {
+enum {
+ OBJC_METHOD_DATA = 1,
+};
+
+using ObjCMethodDataLayout =
+ llvm::BCRecordLayout<OBJC_METHOD_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see
+ // below)
+ llvm::BCBlob // map from ObjC (class names, selector,
+ // is-instance-method) tuples to ObjC
+ // method information
+ >;
+} // namespace objc_method_block
+
+namespace objc_selector_block {
+enum {
+ OBJC_SELECTOR_DATA = 1,
+};
+
+using ObjCSelectorDataLayout =
+ llvm::BCRecordLayout<OBJC_SELECTOR_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see
+ // below)
+ llvm::BCBlob // map from (# pieces, identifier IDs) to
+ // Objective-C selector ID.
+ >;
+} // namespace objc_selector_block
+
+namespace global_variable_block {
+enum { GLOBAL_VARIABLE_DATA = 1 };
+
+using GlobalVariableDataLayout = llvm::BCRecordLayout<
+ GLOBAL_VARIABLE_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see below)
+ llvm::BCBlob // map from name to global variable information
+ >;
+} // namespace global_variable_block
+
+namespace global_function_block {
+enum { GLOBAL_FUNCTION_DATA = 1 };
+
+using GlobalFunctionDataLayout = llvm::BCRecordLayout<
+ GLOBAL_FUNCTION_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see below)
+ llvm::BCBlob // map from name to global function information
+ >;
+} // namespace global_function_block
+
+namespace tag_block {
+enum { TAG_DATA = 1 };
+
+using TagDataLayout =
+ llvm::BCRecordLayout<TAG_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see
+ // below)
+ llvm::BCBlob // map from name to tag information
+ >;
+}; // namespace tag_block
+
+namespace typedef_block {
+enum { TYPEDEF_DATA = 1 };
+
+using TypedefDataLayout =
+ llvm::BCRecordLayout<TYPEDEF_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see
+ // below)
+ llvm::BCBlob // map from name to typedef information
+ >;
+}; // namespace typedef_block
+
+namespace enum_constant_block {
+enum { ENUM_CONSTANT_DATA = 1 };
+
+using EnumConstantDataLayout =
+ llvm::BCRecordLayout<ENUM_CONSTANT_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see
+ // below)
+ llvm::BCBlob // map from name to enumerator information
+ >;
+} // namespace enum_constant_block
+
+/// A stored Objective-C selector.
+struct StoredObjCSelector {
+ unsigned NumPieces;
+ llvm::SmallVector<IdentifierID, 2> Identifiers;
+};
+} // namespace api_notes
+} // namespace clang
+
+#endif
diff --git a/clang/lib/APINotes/APINotesTypes.cpp b/clang/lib/APINotes/APINotesTypes.cpp
new file mode 100644
index 000000000000..c0bb726ea72b
--- /dev/null
+++ b/clang/lib/APINotes/APINotesTypes.cpp
@@ -0,0 +1,107 @@
+//===-- APINotesTypes.cpp - API Notes Data Types ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/APINotes/Types.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+namespace api_notes {
+LLVM_DUMP_METHOD void CommonEntityInfo::dump(llvm::raw_ostream &OS) const {
+ if (Unavailable)
+ OS << "[Unavailable] (" << UnavailableMsg << ")" << ' ';
+ if (UnavailableInSwift)
+ OS << "[UnavailableInSwift] ";
+ if (SwiftPrivateSpecified)
+ OS << (SwiftPrivate ? "[SwiftPrivate] " : "");
+ if (!SwiftName.empty())
+ OS << "Swift Name: " << SwiftName << ' ';
+ OS << '\n';
+}
+
+LLVM_DUMP_METHOD void CommonTypeInfo::dump(llvm::raw_ostream &OS) const {
+ static_cast<const CommonEntityInfo &>(*this).dump(OS);
+ if (SwiftBridge)
+ OS << "Swift Briged Type: " << *SwiftBridge << ' ';
+ if (NSErrorDomain)
+ OS << "NSError Domain: " << *NSErrorDomain << ' ';
+ OS << '\n';
+}
+
+LLVM_DUMP_METHOD void ObjCContextInfo::dump(llvm::raw_ostream &OS) {
+ static_cast<CommonTypeInfo &>(*this).dump(OS);
+ if (HasDefaultNullability)
+ OS << "DefaultNullability: " << DefaultNullability << ' ';
+ if (HasDesignatedInits)
+ OS << "[HasDesignatedInits] ";
+ if (SwiftImportAsNonGenericSpecified)
+ OS << (SwiftImportAsNonGeneric ? "[SwiftImportAsNonGeneric] " : "");
+ if (SwiftObjCMembersSpecified)
+ OS << (SwiftObjCMembers ? "[SwiftObjCMembers] " : "");
+ OS << '\n';
+}
+
+LLVM_DUMP_METHOD void VariableInfo::dump(llvm::raw_ostream &OS) const {
+ static_cast<const CommonEntityInfo &>(*this).dump(OS);
+ if (NullabilityAudited)
+ OS << "Audited Nullability: " << Nullable << ' ';
+ if (!Type.empty())
+ OS << "C Type: " << Type << ' ';
+ OS << '\n';
+}
+
+LLVM_DUMP_METHOD void ObjCPropertyInfo::dump(llvm::raw_ostream &OS) const {
+ static_cast<const VariableInfo &>(*this).dump(OS);
+ if (SwiftImportAsAccessorsSpecified)
+ OS << (SwiftImportAsAccessors ? "[SwiftImportAsAccessors] " : "");
+ OS << '\n';
+}
+
+LLVM_DUMP_METHOD void ParamInfo::dump(llvm::raw_ostream &OS) const {
+ static_cast<const VariableInfo &>(*this).dump(OS);
+ if (NoEscapeSpecified)
+ OS << (NoEscape ? "[NoEscape] " : "");
+ OS << "RawRetainCountConvention: " << RawRetainCountConvention << ' ';
+ OS << '\n';
+}
+
+LLVM_DUMP_METHOD void FunctionInfo::dump(llvm::raw_ostream &OS) const {
+ static_cast<const CommonEntityInfo &>(*this).dump(OS);
+ OS << (NullabilityAudited ? "[NullabilityAudited] " : "")
+ << "RawRetainCountConvention: " << RawRetainCountConvention << ' ';
+ if (!ResultType.empty())
+ OS << "Result Type: " << ResultType << ' ';
+ if (!Params.empty())
+ OS << '\n';
+ for (auto &PI : Params)
+ PI.dump(OS);
+}
+
+LLVM_DUMP_METHOD void ObjCMethodInfo::dump(llvm::raw_ostream &OS) {
+ static_cast<FunctionInfo &>(*this).dump(OS);
+ OS << (DesignatedInit ? "[DesignatedInit] " : "")
+ << (RequiredInit ? "[RequiredInit] " : "") << '\n';
+}
+
+LLVM_DUMP_METHOD void TagInfo::dump(llvm::raw_ostream &OS) {
+ static_cast<CommonTypeInfo &>(*this).dump(OS);
+ if (HasFlagEnum)
+ OS << (IsFlagEnum ? "[FlagEnum] " : "");
+ if (EnumExtensibility)
+ OS << "Enum Extensibility: " << static_cast<long>(*EnumExtensibility)
+ << ' ';
+ OS << '\n';
+}
+
+LLVM_DUMP_METHOD void TypedefInfo::dump(llvm::raw_ostream &OS) const {
+ static_cast<const CommonTypeInfo &>(*this).dump(OS);
+ if (SwiftWrapper)
+ OS << "Swift Type: " << static_cast<long>(*SwiftWrapper) << ' ';
+ OS << '\n';
+}
+} // namespace api_notes
+} // namespace clang
diff --git a/clang/lib/APINotes/APINotesYAMLCompiler.cpp b/clang/lib/APINotes/APINotesYAMLCompiler.cpp
new file mode 100644
index 000000000000..a4120120a01c
--- /dev/null
+++ b/clang/lib/APINotes/APINotesYAMLCompiler.cpp
@@ -0,0 +1,598 @@
+//===-- APINotesYAMLCompiler.cpp - API Notes YAML Format Reader -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The types defined locally are designed to represent the YAML state, which
+// adds an additional bit of state: e.g. a tri-state boolean attribute (yes, no,
+// not applied) becomes a tri-state boolean + present. As a result, while these
+// enumerations appear to be redefining constants from the attributes table
+// data, they are distinct.
+//
+
+#include "clang/APINotes/APINotesYAMLCompiler.h"
+#include "clang/APINotes/Types.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <vector>
+using namespace clang;
+using namespace api_notes;
+
+namespace {
+enum class APIAvailability {
+ Available = 0,
+ OSX,
+ IOS,
+ None,
+ NonSwift,
+};
+} // namespace
+
+namespace llvm {
+namespace yaml {
+template <> struct ScalarEnumerationTraits<APIAvailability> {
+ static void enumeration(IO &IO, APIAvailability &AA) {
+ IO.enumCase(AA, "OSX", APIAvailability::OSX);
+ IO.enumCase(AA, "iOS", APIAvailability::IOS);
+ IO.enumCase(AA, "none", APIAvailability::None);
+ IO.enumCase(AA, "nonswift", APIAvailability::NonSwift);
+ IO.enumCase(AA, "available", APIAvailability::Available);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+enum class MethodKind {
+ Class,
+ Instance,
+};
+} // namespace
+
+namespace llvm {
+namespace yaml {
+template <> struct ScalarEnumerationTraits<MethodKind> {
+ static void enumeration(IO &IO, MethodKind &MK) {
+ IO.enumCase(MK, "Class", MethodKind::Class);
+ IO.enumCase(MK, "Instance", MethodKind::Instance);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct Param {
+ unsigned Position;
+ Optional<bool> NoEscape = false;
+ Optional<NullabilityKind> Nullability;
+ Optional<RetainCountConventionKind> RetainCountConvention;
+ StringRef Type;
+};
+
+typedef std::vector<Param> ParamsSeq;
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(Param)
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(NullabilityKind)
+
+namespace llvm {
+namespace yaml {
+template <> struct ScalarEnumerationTraits<NullabilityKind> {
+ static void enumeration(IO &IO, NullabilityKind &NK) {
+ IO.enumCase(NK, "Nonnull", NullabilityKind::NonNull);
+ IO.enumCase(NK, "Optional", NullabilityKind::Nullable);
+ IO.enumCase(NK, "Unspecified", NullabilityKind::Unspecified);
+ IO.enumCase(NK, "NullableResult", NullabilityKind::NullableResult);
+ // TODO: Mapping this to it's own value would allow for better cross
+ // checking. Also the default should be Unknown.
+ IO.enumCase(NK, "Scalar", NullabilityKind::Unspecified);
+
+ // Aliases for compatibility with existing APINotes.
+ IO.enumCase(NK, "N", NullabilityKind::NonNull);
+ IO.enumCase(NK, "O", NullabilityKind::Nullable);
+ IO.enumCase(NK, "U", NullabilityKind::Unspecified);
+ IO.enumCase(NK, "S", NullabilityKind::Unspecified);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<RetainCountConventionKind> {
+ static void enumeration(IO &IO, RetainCountConventionKind &RCCK) {
+ IO.enumCase(RCCK, "none", RetainCountConventionKind::None);
+ IO.enumCase(RCCK, "CFReturnsRetained",
+ RetainCountConventionKind::CFReturnsRetained);
+ IO.enumCase(RCCK, "CFReturnsNotRetained",
+ RetainCountConventionKind::CFReturnsNotRetained);
+ IO.enumCase(RCCK, "NSReturnsRetained",
+ RetainCountConventionKind::NSReturnsRetained);
+ IO.enumCase(RCCK, "NSReturnsNotRetained",
+ RetainCountConventionKind::NSReturnsNotRetained);
+ }
+};
+
+template <> struct MappingTraits<Param> {
+ static void mapping(IO &IO, Param &P) {
+ IO.mapRequired("Position", P.Position);
+ IO.mapOptional("Nullability", P.Nullability, llvm::None);
+ IO.mapOptional("RetainCountConvention", P.RetainCountConvention);
+ IO.mapOptional("NoEscape", P.NoEscape);
+ IO.mapOptional("Type", P.Type, StringRef(""));
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+typedef std::vector<NullabilityKind> NullabilitySeq;
+
+struct AvailabilityItem {
+ APIAvailability Mode = APIAvailability::Available;
+ StringRef Msg;
+};
+
+/// Old attribute deprecated in favor of SwiftName.
+enum class FactoryAsInitKind {
+ /// Infer based on name and type (the default).
+ Infer,
+ /// Treat as a class method.
+ AsClassMethod,
+ /// Treat as an initializer.
+ AsInitializer,
+};
+
+struct Method {
+ StringRef Selector;
+ MethodKind Kind;
+ ParamsSeq Params;
+ NullabilitySeq Nullability;
+ Optional<NullabilityKind> NullabilityOfRet;
+ Optional<RetainCountConventionKind> RetainCountConvention;
+ AvailabilityItem Availability;
+ Optional<bool> SwiftPrivate;
+ StringRef SwiftName;
+ FactoryAsInitKind FactoryAsInit = FactoryAsInitKind::Infer;
+ bool DesignatedInit = false;
+ bool Required = false;
+ StringRef ResultType;
+};
+
+typedef std::vector<Method> MethodsSeq;
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(Method)
+
+namespace llvm {
+namespace yaml {
+template <> struct ScalarEnumerationTraits<FactoryAsInitKind> {
+ static void enumeration(IO &IO, FactoryAsInitKind &FIK) {
+ IO.enumCase(FIK, "A", FactoryAsInitKind::Infer);
+ IO.enumCase(FIK, "C", FactoryAsInitKind::AsClassMethod);
+ IO.enumCase(FIK, "I", FactoryAsInitKind::AsInitializer);
+ }
+};
+
+template <> struct MappingTraits<Method> {
+ static void mapping(IO &IO, Method &M) {
+ IO.mapRequired("Selector", M.Selector);
+ IO.mapRequired("MethodKind", M.Kind);
+ IO.mapOptional("Parameters", M.Params);
+ IO.mapOptional("Nullability", M.Nullability);
+ IO.mapOptional("NullabilityOfRet", M.NullabilityOfRet, llvm::None);
+ IO.mapOptional("RetainCountConvention", M.RetainCountConvention);
+ IO.mapOptional("Availability", M.Availability.Mode,
+ APIAvailability::Available);
+ IO.mapOptional("AvailabilityMsg", M.Availability.Msg, StringRef(""));
+ IO.mapOptional("SwiftPrivate", M.SwiftPrivate);
+ IO.mapOptional("SwiftName", M.SwiftName, StringRef(""));
+ IO.mapOptional("FactoryAsInit", M.FactoryAsInit, FactoryAsInitKind::Infer);
+ IO.mapOptional("DesignatedInit", M.DesignatedInit, false);
+ IO.mapOptional("Required", M.Required, false);
+ IO.mapOptional("ResultType", M.ResultType, StringRef(""));
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct Property {
+ StringRef Name;
+ llvm::Optional<MethodKind> Kind;
+ llvm::Optional<NullabilityKind> Nullability;
+ AvailabilityItem Availability;
+ Optional<bool> SwiftPrivate;
+ StringRef SwiftName;
+ Optional<bool> SwiftImportAsAccessors;
+ StringRef Type;
+};
+
+typedef std::vector<Property> PropertiesSeq;
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(Property)
+
+namespace llvm {
+namespace yaml {
+template <> struct MappingTraits<Property> {
+ static void mapping(IO &IO, Property &P) {
+ IO.mapRequired("Name", P.Name);
+ IO.mapOptional("PropertyKind", P.Kind);
+ IO.mapOptional("Nullability", P.Nullability, llvm::None);
+ IO.mapOptional("Availability", P.Availability.Mode,
+ APIAvailability::Available);
+ IO.mapOptional("AvailabilityMsg", P.Availability.Msg, StringRef(""));
+ IO.mapOptional("SwiftPrivate", P.SwiftPrivate);
+ IO.mapOptional("SwiftName", P.SwiftName, StringRef(""));
+ IO.mapOptional("SwiftImportAsAccessors", P.SwiftImportAsAccessors);
+ IO.mapOptional("Type", P.Type, StringRef(""));
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct Class {
+ StringRef Name;
+ bool AuditedForNullability = false;
+ AvailabilityItem Availability;
+ Optional<bool> SwiftPrivate;
+ StringRef SwiftName;
+ Optional<StringRef> SwiftBridge;
+ Optional<StringRef> NSErrorDomain;
+ Optional<bool> SwiftImportAsNonGeneric;
+ Optional<bool> SwiftObjCMembers;
+ MethodsSeq Methods;
+ PropertiesSeq Properties;
+};
+
+typedef std::vector<Class> ClassesSeq;
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(Class)
+
+namespace llvm {
+namespace yaml {
+template <> struct MappingTraits<Class> {
+ static void mapping(IO &IO, Class &C) {
+ IO.mapRequired("Name", C.Name);
+ IO.mapOptional("AuditedForNullability", C.AuditedForNullability, false);
+ IO.mapOptional("Availability", C.Availability.Mode,
+ APIAvailability::Available);
+ IO.mapOptional("AvailabilityMsg", C.Availability.Msg, StringRef(""));
+ IO.mapOptional("SwiftPrivate", C.SwiftPrivate);
+ IO.mapOptional("SwiftName", C.SwiftName, StringRef(""));
+ IO.mapOptional("SwiftBridge", C.SwiftBridge);
+ IO.mapOptional("NSErrorDomain", C.NSErrorDomain);
+ IO.mapOptional("SwiftImportAsNonGeneric", C.SwiftImportAsNonGeneric);
+ IO.mapOptional("SwiftObjCMembers", C.SwiftObjCMembers);
+ IO.mapOptional("Methods", C.Methods);
+ IO.mapOptional("Properties", C.Properties);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct Function {
+ StringRef Name;
+ ParamsSeq Params;
+ NullabilitySeq Nullability;
+ Optional<NullabilityKind> NullabilityOfRet;
+ Optional<api_notes::RetainCountConventionKind> RetainCountConvention;
+ AvailabilityItem Availability;
+ Optional<bool> SwiftPrivate;
+ StringRef SwiftName;
+ StringRef Type;
+ StringRef ResultType;
+};
+
+typedef std::vector<Function> FunctionsSeq;
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(Function)
+
+namespace llvm {
+namespace yaml {
+template <> struct MappingTraits<Function> {
+ static void mapping(IO &IO, Function &F) {
+ IO.mapRequired("Name", F.Name);
+ IO.mapOptional("Parameters", F.Params);
+ IO.mapOptional("Nullability", F.Nullability);
+ IO.mapOptional("NullabilityOfRet", F.NullabilityOfRet, llvm::None);
+ IO.mapOptional("RetainCountConvention", F.RetainCountConvention);
+ IO.mapOptional("Availability", F.Availability.Mode,
+ APIAvailability::Available);
+ IO.mapOptional("AvailabilityMsg", F.Availability.Msg, StringRef(""));
+ IO.mapOptional("SwiftPrivate", F.SwiftPrivate);
+ IO.mapOptional("SwiftName", F.SwiftName, StringRef(""));
+ IO.mapOptional("ResultType", F.ResultType, StringRef(""));
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct GlobalVariable {
+ StringRef Name;
+ llvm::Optional<NullabilityKind> Nullability;
+ AvailabilityItem Availability;
+ Optional<bool> SwiftPrivate;
+ StringRef SwiftName;
+ StringRef Type;
+};
+
+typedef std::vector<GlobalVariable> GlobalVariablesSeq;
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(GlobalVariable)
+
+namespace llvm {
+namespace yaml {
+template <> struct MappingTraits<GlobalVariable> {
+ static void mapping(IO &IO, GlobalVariable &GV) {
+ IO.mapRequired("Name", GV.Name);
+ IO.mapOptional("Nullability", GV.Nullability, llvm::None);
+ IO.mapOptional("Availability", GV.Availability.Mode,
+ APIAvailability::Available);
+ IO.mapOptional("AvailabilityMsg", GV.Availability.Msg, StringRef(""));
+ IO.mapOptional("SwiftPrivate", GV.SwiftPrivate);
+ IO.mapOptional("SwiftName", GV.SwiftName, StringRef(""));
+ IO.mapOptional("Type", GV.Type, StringRef(""));
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct EnumConstant {
+ StringRef Name;
+ AvailabilityItem Availability;
+ Optional<bool> SwiftPrivate;
+ StringRef SwiftName;
+};
+
+typedef std::vector<EnumConstant> EnumConstantsSeq;
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(EnumConstant)
+
+namespace llvm {
+namespace yaml {
+template <> struct MappingTraits<EnumConstant> {
+ static void mapping(IO &IO, EnumConstant &EC) {
+ IO.mapRequired("Name", EC.Name);
+ IO.mapOptional("Availability", EC.Availability.Mode,
+ APIAvailability::Available);
+ IO.mapOptional("AvailabilityMsg", EC.Availability.Msg, StringRef(""));
+ IO.mapOptional("SwiftPrivate", EC.SwiftPrivate);
+ IO.mapOptional("SwiftName", EC.SwiftName, StringRef(""));
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+/// Syntactic sugar for EnumExtensibility and FlagEnum
+enum class EnumConvenienceAliasKind {
+ /// EnumExtensibility: none, FlagEnum: false
+ None,
+ /// EnumExtensibility: open, FlagEnum: false
+ CFEnum,
+ /// EnumExtensibility: open, FlagEnum: true
+ CFOptions,
+ /// EnumExtensibility: closed, FlagEnum: false
+ CFClosedEnum
+};
+} // namespace
+
+namespace llvm {
+namespace yaml {
+template <> struct ScalarEnumerationTraits<EnumConvenienceAliasKind> {
+ static void enumeration(IO &IO, EnumConvenienceAliasKind &ECAK) {
+ IO.enumCase(ECAK, "none", EnumConvenienceAliasKind::None);
+ IO.enumCase(ECAK, "CFEnum", EnumConvenienceAliasKind::CFEnum);
+ IO.enumCase(ECAK, "NSEnum", EnumConvenienceAliasKind::CFEnum);
+ IO.enumCase(ECAK, "CFOptions", EnumConvenienceAliasKind::CFOptions);
+ IO.enumCase(ECAK, "NSOptions", EnumConvenienceAliasKind::CFOptions);
+ IO.enumCase(ECAK, "CFClosedEnum", EnumConvenienceAliasKind::CFClosedEnum);
+ IO.enumCase(ECAK, "NSClosedEnum", EnumConvenienceAliasKind::CFClosedEnum);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct Tag {
+ StringRef Name;
+ AvailabilityItem Availability;
+ StringRef SwiftName;
+ Optional<bool> SwiftPrivate;
+ Optional<StringRef> SwiftBridge;
+ Optional<StringRef> NSErrorDomain;
+ Optional<EnumExtensibilityKind> EnumExtensibility;
+ Optional<bool> FlagEnum;
+ Optional<EnumConvenienceAliasKind> EnumConvenienceKind;
+};
+
+typedef std::vector<Tag> TagsSeq;
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(Tag)
+
+namespace llvm {
+namespace yaml {
+template <> struct ScalarEnumerationTraits<EnumExtensibilityKind> {
+ static void enumeration(IO &IO, EnumExtensibilityKind &EEK) {
+ IO.enumCase(EEK, "none", EnumExtensibilityKind::None);
+ IO.enumCase(EEK, "open", EnumExtensibilityKind::Open);
+ IO.enumCase(EEK, "closed", EnumExtensibilityKind::Closed);
+ }
+};
+
+template <> struct MappingTraits<Tag> {
+ static void mapping(IO &IO, Tag &T) {
+ IO.mapRequired("Name", T.Name);
+ IO.mapOptional("Availability", T.Availability.Mode,
+ APIAvailability::Available);
+ IO.mapOptional("AvailabilityMsg", T.Availability.Msg, StringRef(""));
+ IO.mapOptional("SwiftPrivate", T.SwiftPrivate);
+ IO.mapOptional("SwiftName", T.SwiftName, StringRef(""));
+ IO.mapOptional("SwiftBridge", T.SwiftBridge);
+ IO.mapOptional("NSErrorDomain", T.NSErrorDomain);
+ IO.mapOptional("EnumExtensibility", T.EnumExtensibility);
+ IO.mapOptional("FlagEnum", T.FlagEnum);
+ IO.mapOptional("EnumKind", T.EnumConvenienceKind);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct Typedef {
+ StringRef Name;
+ AvailabilityItem Availability;
+ StringRef SwiftName;
+ Optional<bool> SwiftPrivate;
+ Optional<StringRef> SwiftBridge;
+ Optional<StringRef> NSErrorDomain;
+ Optional<SwiftNewTypeKind> SwiftType;
+};
+
+typedef std::vector<Typedef> TypedefsSeq;
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(Typedef)
+
+namespace llvm {
+namespace yaml {
+template <> struct ScalarEnumerationTraits<SwiftNewTypeKind> {
+ static void enumeration(IO &IO, SwiftNewTypeKind &SWK) {
+ IO.enumCase(SWK, "none", SwiftNewTypeKind::None);
+ IO.enumCase(SWK, "struct", SwiftNewTypeKind::Struct);
+ IO.enumCase(SWK, "enum", SwiftNewTypeKind::Enum);
+ }
+};
+
+template <> struct MappingTraits<Typedef> {
+ static void mapping(IO &IO, Typedef &T) {
+ IO.mapRequired("Name", T.Name);
+ IO.mapOptional("Availability", T.Availability.Mode,
+ APIAvailability::Available);
+ IO.mapOptional("AvailabilityMsg", T.Availability.Msg, StringRef(""));
+ IO.mapOptional("SwiftPrivate", T.SwiftPrivate);
+ IO.mapOptional("SwiftName", T.SwiftName, StringRef(""));
+ IO.mapOptional("SwiftBridge", T.SwiftBridge);
+ IO.mapOptional("NSErrorDomain", T.NSErrorDomain);
+ IO.mapOptional("SwiftWrapper", T.SwiftType);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct TopLevelItems {
+ ClassesSeq Classes;
+ ClassesSeq Protocols;
+ FunctionsSeq Functions;
+ GlobalVariablesSeq Globals;
+ EnumConstantsSeq EnumConstants;
+ TagsSeq Tags;
+ TypedefsSeq Typedefs;
+};
+} // namespace
+
+namespace llvm {
+namespace yaml {
+static void mapTopLevelItems(IO &IO, TopLevelItems &TLI) {
+ IO.mapOptional("Classes", TLI.Classes);
+ IO.mapOptional("Protocols", TLI.Protocols);
+ IO.mapOptional("Functions", TLI.Functions);
+ IO.mapOptional("Globals", TLI.Globals);
+ IO.mapOptional("Enumerators", TLI.EnumConstants);
+ IO.mapOptional("Tags", TLI.Tags);
+ IO.mapOptional("Typedefs", TLI.Typedefs);
+}
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct Versioned {
+ VersionTuple Version;
+ TopLevelItems Items;
+};
+
+typedef std::vector<Versioned> VersionedSeq;
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(Versioned)
+
+namespace llvm {
+namespace yaml {
+template <> struct MappingTraits<Versioned> {
+ static void mapping(IO &IO, Versioned &V) {
+ IO.mapRequired("Version", V.Version);
+ mapTopLevelItems(IO, V.Items);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+struct Module {
+ StringRef Name;
+ AvailabilityItem Availability;
+ TopLevelItems TopLevel;
+ VersionedSeq SwiftVersions;
+
+ llvm::Optional<bool> SwiftInferImportAsMember = {llvm::None};
+
+ LLVM_DUMP_METHOD void dump() /*const*/;
+};
+} // namespace
+
+namespace llvm {
+namespace yaml {
+template <> struct MappingTraits<Module> {
+ static void mapping(IO &IO, Module &M) {
+ IO.mapRequired("Name", M.Name);
+ IO.mapOptional("Availability", M.Availability.Mode,
+ APIAvailability::Available);
+ IO.mapOptional("AvailabilityMsg", M.Availability.Msg, StringRef(""));
+ IO.mapOptional("SwiftInferImportAsMember", M.SwiftInferImportAsMember);
+ mapTopLevelItems(IO, M.TopLevel);
+ IO.mapOptional("SwiftVersions", M.SwiftVersions);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+void Module::dump() {
+ llvm::yaml::Output OS(llvm::errs());
+ OS << *this;
+}
+
+namespace {
+bool parseAPINotes(StringRef YI, Module &M, llvm::SourceMgr::DiagHandlerTy Diag,
+ void *DiagContext) {
+ llvm::yaml::Input IS(YI, nullptr, Diag, DiagContext);
+ IS >> M;
+ return static_cast<bool>(IS.error());
+}
+} // namespace
+
+bool clang::api_notes::parseAndDumpAPINotes(StringRef YI,
+ llvm::raw_ostream &OS) {
+ Module M;
+ if (parseAPINotes(YI, M, nullptr, nullptr))
+ return true;
+
+ llvm::yaml::Output YOS(OS);
+ YOS << M;
+
+ return false;
+}
diff --git a/clang/lib/ARCMigrate/ARCMT.cpp b/clang/lib/ARCMigrate/ARCMT.cpp
index e18def8a0b19..36fbe90e1e3a 100644
--- a/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/clang/lib/ARCMigrate/ARCMT.cpp
@@ -416,9 +416,11 @@ bool arcmt::getFileRemappings(std::vector<std::pair<std::string,std::string> > &
if (err)
return true;
- PreprocessorOptions PPOpts;
- remapper.applyMappings(PPOpts);
- remap = PPOpts.RemappedFiles;
+ remapper.forEachMapping(
+ [&](StringRef From, StringRef To) {
+ remap.push_back(std::make_pair(From.str(), To.str()));
+ },
+ [](StringRef, const llvm::MemoryBufferRef &) {});
return false;
}
diff --git a/clang/lib/ARCMigrate/FileRemapper.cpp b/clang/lib/ARCMigrate/FileRemapper.cpp
index 0222583c015b..f536af1795ed 100644
--- a/clang/lib/ARCMigrate/FileRemapper.cpp
+++ b/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -190,6 +190,21 @@ bool FileRemapper::overwriteOriginal(DiagnosticsEngine &Diag,
return false;
}
+void FileRemapper::forEachMapping(
+ llvm::function_ref<void(StringRef, StringRef)> CaptureFile,
+ llvm::function_ref<void(StringRef, const llvm::MemoryBufferRef &)>
+ CaptureBuffer) const {
+ for (auto &Mapping : FromToMappings) {
+ if (const FileEntry *FE = Mapping.second.dyn_cast<const FileEntry *>()) {
+ CaptureFile(Mapping.first->getName(), FE->getName());
+ continue;
+ }
+ CaptureBuffer(
+ Mapping.first->getName(),
+ Mapping.second.get<llvm::MemoryBuffer *>()->getMemBufferRef());
+ }
+}
+
void FileRemapper::applyMappings(PreprocessorOptions &PPOpts) const {
for (MappingsTy::const_iterator
I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
diff --git a/clang/lib/ARCMigrate/ObjCMT.cpp b/clang/lib/ARCMigrate/ObjCMT.cpp
index 51c4a460cc25..68a51a49c718 100644
--- a/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -156,7 +156,7 @@ protected:
return WhiteListFilenames.find(llvm::sys::path::filename(Path))
!= WhiteListFilenames.end();
}
- bool canModifyFile(const FileEntry *FE) {
+ bool canModifyFile(Optional<FileEntryRef> FE) {
if (!FE)
return false;
return canModifyFile(FE->getName());
@@ -164,7 +164,7 @@ protected:
bool canModifyFile(FileID FID) {
if (FID.isInvalid())
return false;
- return canModifyFile(PP.getSourceManager().getFileEntryForID(FID));
+ return canModifyFile(PP.getSourceManager().getFileEntryRefForID(FID));
}
bool canModify(const Decl *D) {
@@ -1964,7 +1964,7 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) {
FileID FID = I->first;
RewriteBuffer &buf = I->second;
- const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID);
+ Optional<FileEntryRef> file = Ctx.getSourceManager().getFileEntryRefForID(FID);
assert(file);
SmallString<512> newText;
llvm::raw_svector_ostream vecOS(newText);
@@ -2034,12 +2034,10 @@ MigrateSourceAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
namespace {
struct EditEntry {
- const FileEntry *File;
- unsigned Offset;
- unsigned RemoveLen;
+ Optional<FileEntryRef> File;
+ unsigned Offset = 0;
+ unsigned RemoveLen = 0;
std::string Text;
-
- EditEntry() : File(), Offset(), RemoveLen() {}
};
} // end anonymous namespace
@@ -2056,12 +2054,8 @@ template<> struct DenseMapInfo<EditEntry> {
return Entry;
}
static unsigned getHashValue(const EditEntry& Val) {
- llvm::FoldingSetNodeID ID;
- ID.AddPointer(Val.File);
- ID.AddInteger(Val.Offset);
- ID.AddInteger(Val.RemoveLen);
- ID.AddString(Val.Text);
- return ID.ComputeHash();
+ return (unsigned)llvm::hash_combine(Val.File, Val.Offset, Val.RemoveLen,
+ Val.Text);
}
static bool isEqual(const EditEntry &LHS, const EditEntry &RHS) {
return LHS.File == RHS.File &&
@@ -2133,9 +2127,8 @@ private:
StringRef Val = ValueString->getValue(ValueStorage);
if (Key == "file") {
- auto FE = FileMgr.getFile(Val);
- if (FE)
- Entry.File = *FE;
+ if (auto File = FileMgr.getOptionalFileRef(Val))
+ Entry.File = File;
else
Ignore = true;
} else if (Key == "offset") {
@@ -2161,7 +2154,7 @@ static bool reportDiag(const Twine &Err, DiagnosticsEngine &Diag) {
return true;
}
-static std::string applyEditsToTemp(const FileEntry *FE,
+static std::string applyEditsToTemp(FileEntryRef FE,
ArrayRef<EditEntry> Edits,
FileManager &FileMgr,
DiagnosticsEngine &Diag) {
@@ -2205,8 +2198,8 @@ static std::string applyEditsToTemp(const FileEntry *FE,
SmallString<64> TempPath;
int FD;
- if (fs::createTemporaryFile(path::filename(FE->getName()),
- path::extension(FE->getName()).drop_front(), FD,
+ if (fs::createTemporaryFile(path::filename(FE.getName()),
+ path::extension(FE.getName()).drop_front(), FD,
TempPath)) {
reportDiag("Could not create file: " + TempPath.str(), Diag);
return std::string();
@@ -2234,7 +2227,7 @@ bool arcmt::getFileRemappingsFromFileList(
new DiagnosticsEngine(DiagID, new DiagnosticOptions,
DiagClient, /*ShouldOwnClient=*/false));
- typedef llvm::DenseMap<const FileEntry *, std::vector<EditEntry> >
+ typedef llvm::DenseMap<FileEntryRef, std::vector<EditEntry> >
FileEditEntriesTy;
FileEditEntriesTy FileEditEntries;
@@ -2256,7 +2249,7 @@ bool arcmt::getFileRemappingsFromFileList(
if (!Insert.second)
continue;
- FileEditEntries[Entry.File].push_back(Entry);
+ FileEditEntries[*Entry.File].push_back(Entry);
}
}
@@ -2269,7 +2262,7 @@ bool arcmt::getFileRemappingsFromFileList(
continue;
}
- remap.emplace_back(std::string(I->first->getName()), TempFile);
+ remap.emplace_back(std::string(I->first.getName()), TempFile);
}
return hasErrorOccurred;
diff --git a/clang/lib/ARCMigrate/TransGCAttrs.cpp b/clang/lib/ARCMigrate/TransGCAttrs.cpp
index 8f5f3cff17cb..99a61e0842a7 100644
--- a/clang/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/clang/lib/ARCMigrate/TransGCAttrs.cpp
@@ -88,8 +88,8 @@ public:
return false;
SourceLocation Loc = OwnershipAttr->getLocation();
- unsigned RawLoc = Loc.getRawEncoding();
- if (MigrateCtx.AttrSet.count(RawLoc))
+ SourceLocation OrigLoc = Loc;
+ if (MigrateCtx.AttrSet.count(OrigLoc))
return true;
ASTContext &Ctx = MigrateCtx.Pass.Ctx;
@@ -105,7 +105,7 @@ public:
else
return false;
- MigrateCtx.AttrSet.insert(RawLoc);
+ MigrateCtx.AttrSet.insert(OrigLoc);
MigrateCtx.GCAttrs.push_back(MigrationContext::GCAttrOccurrence());
MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs.back();
@@ -204,7 +204,7 @@ static void checkWeakGCAttrs(MigrationContext &MigrateCtx) {
if (!canApplyWeak(MigrateCtx.Pass.Ctx, Attr.ModifiedType,
/*AllowOnUnknownClass=*/true)) {
Transaction Trans(TA);
- if (!MigrateCtx.RemovedAttrSet.count(Attr.Loc.getRawEncoding()))
+ if (!MigrateCtx.RemovedAttrSet.count(Attr.Loc))
TA.replaceText(Attr.Loc, "__weak", "__unsafe_unretained");
TA.clearDiagnostic(diag::err_arc_weak_no_runtime,
diag::err_arc_unsupported_weak_class,
@@ -262,7 +262,7 @@ static void checkAllAtProps(MigrationContext &MigrateCtx,
if (GCAttrsCollector::hasObjCImpl(
cast<Decl>(IndProps.front()->getDeclContext()))) {
if (hasWeak)
- MigrateCtx.AtPropsWeak.insert(AtLoc.getRawEncoding());
+ MigrateCtx.AtPropsWeak.insert(AtLoc);
} else {
StringRef toAttr = "strong";
@@ -289,14 +289,14 @@ static void checkAllAtProps(MigrationContext &MigrateCtx,
TA.clearDiagnostic(diag::err_objc_property_attr_mutually_exclusive, AtLoc);
TA.clearDiagnostic(diag::err_arc_inconsistent_property_ownership,
ATLs[i].second->getLocation());
- MigrateCtx.RemovedAttrSet.insert(Loc.getRawEncoding());
+ MigrateCtx.RemovedAttrSet.insert(Loc);
}
}
static void checkAllProps(MigrationContext &MigrateCtx,
std::vector<ObjCPropertyDecl *> &AllProps) {
typedef llvm::TinyPtrVector<ObjCPropertyDecl *> IndivPropsTy;
- llvm::DenseMap<unsigned, IndivPropsTy> AtProps;
+ llvm::DenseMap<SourceLocation, IndivPropsTy> AtProps;
for (unsigned i = 0, e = AllProps.size(); i != e; ++i) {
ObjCPropertyDecl *PD = AllProps[i];
@@ -306,14 +306,12 @@ static void checkAllProps(MigrationContext &MigrateCtx,
SourceLocation AtLoc = PD->getAtLoc();
if (AtLoc.isInvalid())
continue;
- unsigned RawAt = AtLoc.getRawEncoding();
- AtProps[RawAt].push_back(PD);
+ AtProps[AtLoc].push_back(PD);
}
}
- for (llvm::DenseMap<unsigned, IndivPropsTy>::iterator
- I = AtProps.begin(), E = AtProps.end(); I != E; ++I) {
- SourceLocation AtLoc = SourceLocation::getFromRawEncoding(I->first);
+ for (auto I = AtProps.begin(), E = AtProps.end(); I != E; ++I) {
+ SourceLocation AtLoc = I->first;
IndivPropsTy &IndProps = I->second;
checkAllAtProps(MigrateCtx, AtLoc, IndProps);
}
diff --git a/clang/lib/ARCMigrate/TransProperties.cpp b/clang/lib/ARCMigrate/TransProperties.cpp
index cba2256ef97b..e5ccf1cf79b1 100644
--- a/clang/lib/ARCMigrate/TransProperties.cpp
+++ b/clang/lib/ARCMigrate/TransProperties.cpp
@@ -65,7 +65,7 @@ class PropertiesRewriter {
};
typedef SmallVector<PropData, 2> PropsTy;
- typedef std::map<unsigned, PropsTy> AtPropDeclsTy;
+ typedef std::map<SourceLocation, PropsTy> AtPropDeclsTy;
AtPropDeclsTy AtProps;
llvm::DenseMap<IdentifierInfo *, PropActionKind> ActionOnProp;
@@ -76,13 +76,13 @@ public:
static void collectProperties(ObjCContainerDecl *D, AtPropDeclsTy &AtProps,
AtPropDeclsTy *PrevAtProps = nullptr) {
for (auto *Prop : D->instance_properties()) {
- if (Prop->getAtLoc().isInvalid())
+ SourceLocation Loc = Prop->getAtLoc();
+ if (Loc.isInvalid())
continue;
- unsigned RawLoc = Prop->getAtLoc().getRawEncoding();
if (PrevAtProps)
- if (PrevAtProps->find(RawLoc) != PrevAtProps->end())
+ if (PrevAtProps->find(Loc) != PrevAtProps->end())
continue;
- PropsTy &props = AtProps[RawLoc];
+ PropsTy &props = AtProps[Loc];
props.push_back(Prop);
}
}
@@ -113,8 +113,7 @@ public:
ObjCIvarDecl *ivarD = implD->getPropertyIvarDecl();
if (!ivarD || ivarD->isInvalidDecl())
continue;
- unsigned rawAtLoc = propD->getAtLoc().getRawEncoding();
- AtPropDeclsTy::iterator findAtLoc = AtProps.find(rawAtLoc);
+ AtPropDeclsTy::iterator findAtLoc = AtProps.find(propD->getAtLoc());
if (findAtLoc == AtProps.end())
continue;
@@ -130,7 +129,7 @@ public:
for (AtPropDeclsTy::iterator
I = AtProps.begin(), E = AtProps.end(); I != E; ++I) {
- SourceLocation atLoc = SourceLocation::getFromRawEncoding(I->first);
+ SourceLocation atLoc = I->first;
PropsTy &props = I->second;
if (!getPropertyType(props)->isObjCRetainableType())
continue;
@@ -338,7 +337,7 @@ private:
return false;
if (props.empty())
return false;
- return MigrateCtx.AtPropsWeak.count(atLoc.getRawEncoding());
+ return MigrateCtx.AtPropsWeak.count(atLoc);
}
bool isUserDeclared(ObjCIvarDecl *ivarD) const {
diff --git a/clang/lib/ARCMigrate/Transforms.h b/clang/lib/ARCMigrate/Transforms.h
index e087136f0e2c..37e2d6b2a7e1 100644
--- a/clang/lib/ARCMigrate/Transforms.h
+++ b/clang/lib/ARCMigrate/Transforms.h
@@ -93,12 +93,12 @@ public:
bool FullyMigratable;
};
std::vector<GCAttrOccurrence> GCAttrs;
- llvm::DenseSet<unsigned> AttrSet;
- llvm::DenseSet<unsigned> RemovedAttrSet;
+ llvm::DenseSet<SourceLocation> AttrSet;
+ llvm::DenseSet<SourceLocation> RemovedAttrSet;
/// Set of raw '@' locations for 'assign' properties group that contain
/// GC __weak.
- llvm::DenseSet<unsigned> AtPropsWeak;
+ llvm::DenseSet<SourceLocation> AtPropsWeak;
explicit MigrationContext(MigrationPass &pass) : Pass(pass) {}
~MigrationContext();
diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp
index f3828bb54c1d..9a9233bc1ea7 100644
--- a/clang/lib/AST/APValue.cpp
+++ b/clang/lib/AST/APValue.cpp
@@ -11,10 +11,12 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/APValue.h"
+#include "Linkage.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/Type.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -38,7 +40,7 @@ static_assert(
"Type is insufficiently aligned");
APValue::LValueBase::LValueBase(const ValueDecl *P, unsigned I, unsigned V)
- : Ptr(P), Local{I, V} {}
+ : Ptr(P ? cast<ValueDecl>(P->getCanonicalDecl()) : nullptr), Local{I, V} {}
APValue::LValueBase::LValueBase(const Expr *P, unsigned I, unsigned V)
: Ptr(P), Local{I, V} {}
@@ -58,6 +60,51 @@ APValue::LValueBase APValue::LValueBase::getTypeInfo(TypeInfoLValue LV,
return Base;
}
+QualType APValue::LValueBase::getType() const {
+ if (!*this) return QualType();
+ if (const ValueDecl *D = dyn_cast<const ValueDecl*>()) {
+ // FIXME: It's unclear where we're supposed to take the type from, and
+ // this actually matters for arrays of unknown bound. Eg:
+ //
+ // extern int arr[]; void f() { extern int arr[3]; };
+ // constexpr int *p = &arr[1]; // valid?
+ //
+ // For now, we take the most complete type we can find.
+ for (auto *Redecl = cast<ValueDecl>(D->getMostRecentDecl()); Redecl;
+ Redecl = cast_or_null<ValueDecl>(Redecl->getPreviousDecl())) {
+ QualType T = Redecl->getType();
+ if (!T->isIncompleteArrayType())
+ return T;
+ }
+ return D->getType();
+ }
+
+ if (is<TypeInfoLValue>())
+ return getTypeInfoType();
+
+ if (is<DynamicAllocLValue>())
+ return getDynamicAllocType();
+
+ const Expr *Base = get<const Expr*>();
+
+ // For a materialized temporary, the type of the temporary we materialized
+ // may not be the type of the expression.
+ if (const MaterializeTemporaryExpr *MTE =
+ clang::dyn_cast<MaterializeTemporaryExpr>(Base)) {
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ const Expr *Temp = MTE->getSubExpr();
+ const Expr *Inner = Temp->skipRValueSubobjectAdjustments(CommaLHSs,
+ Adjustments);
+ // Keep any cv-qualifiers from the reference if we generated a temporary
+ // for it directly. Otherwise use the type after adjustment.
+ if (!Adjustments.empty())
+ return Inner->getType();
+ }
+
+ return Base->getType();
+}
+
unsigned APValue::LValueBase::getCallIndex() const {
return (is<TypeInfoLValue>() || is<DynamicAllocLValue>()) ? 0
: Local.CallIndex;
@@ -77,18 +124,44 @@ QualType APValue::LValueBase::getDynamicAllocType() const {
return QualType::getFromOpaquePtr(DynamicAllocType);
}
+void APValue::LValueBase::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(Ptr.getOpaqueValue());
+ if (is<TypeInfoLValue>() || is<DynamicAllocLValue>())
+ return;
+ ID.AddInteger(Local.CallIndex);
+ ID.AddInteger(Local.Version);
+}
+
namespace clang {
bool operator==(const APValue::LValueBase &LHS,
const APValue::LValueBase &RHS) {
if (LHS.Ptr != RHS.Ptr)
return false;
- if (LHS.is<TypeInfoLValue>())
+ if (LHS.is<TypeInfoLValue>() || LHS.is<DynamicAllocLValue>())
return true;
return LHS.Local.CallIndex == RHS.Local.CallIndex &&
LHS.Local.Version == RHS.Local.Version;
}
}
+APValue::LValuePathEntry::LValuePathEntry(BaseOrMemberType BaseOrMember) {
+ if (const Decl *D = BaseOrMember.getPointer())
+ BaseOrMember.setPointer(D->getCanonicalDecl());
+ Value = reinterpret_cast<uintptr_t>(BaseOrMember.getOpaqueValue());
+}
+
+void APValue::LValuePathEntry::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Value);
+}
+
+APValue::LValuePathSerializationHelper::LValuePathSerializationHelper(
+ ArrayRef<LValuePathEntry> Path, QualType ElemTy)
+ : ElemTy((const void *)ElemTy.getTypePtrOrNull()), Path(Path) {}
+
+QualType APValue::LValuePathSerializationHelper::getType() {
+ return QualType::getFromOpaquePtr(ElemTy);
+}
+
namespace {
struct LVBase {
APValue::LValueBase Base;
@@ -113,14 +186,16 @@ APValue::LValueBase::operator bool () const {
clang::APValue::LValueBase
llvm::DenseMapInfo<clang::APValue::LValueBase>::getEmptyKey() {
- return clang::APValue::LValueBase(
- DenseMapInfo<const ValueDecl*>::getEmptyKey());
+ clang::APValue::LValueBase B;
+ B.Ptr = DenseMapInfo<const ValueDecl*>::getEmptyKey();
+ return B;
}
clang::APValue::LValueBase
llvm::DenseMapInfo<clang::APValue::LValueBase>::getTombstoneKey() {
- return clang::APValue::LValueBase(
- DenseMapInfo<const ValueDecl*>::getTombstoneKey());
+ clang::APValue::LValueBase B;
+ B.Ptr = DenseMapInfo<const ValueDecl*>::getTombstoneKey();
+ return B;
}
namespace clang {
@@ -254,7 +329,7 @@ APValue::APValue(const APValue &RHS) : Kind(None) {
}
case Vector:
MakeVector();
- setVector(((const Vec *)(const char *)RHS.Data.buffer)->Elts,
+ setVector(((const Vec *)(const char *)&RHS.Data)->Elts,
RHS.getVectorLength());
break;
case ComplexInt:
@@ -304,31 +379,50 @@ APValue::APValue(const APValue &RHS) : Kind(None) {
}
}
+APValue::APValue(APValue &&RHS) : Kind(RHS.Kind), Data(RHS.Data) {
+ RHS.Kind = None;
+}
+
+APValue &APValue::operator=(const APValue &RHS) {
+ if (this != &RHS)
+ *this = APValue(RHS);
+ return *this;
+}
+
+APValue &APValue::operator=(APValue &&RHS) {
+ if (Kind != None && Kind != Indeterminate)
+ DestroyDataAndMakeUninit();
+ Kind = RHS.Kind;
+ Data = RHS.Data;
+ RHS.Kind = None;
+ return *this;
+}
+
void APValue::DestroyDataAndMakeUninit() {
if (Kind == Int)
- ((APSInt*)(char*)Data.buffer)->~APSInt();
+ ((APSInt *)(char *)&Data)->~APSInt();
else if (Kind == Float)
- ((APFloat*)(char*)Data.buffer)->~APFloat();
+ ((APFloat *)(char *)&Data)->~APFloat();
else if (Kind == FixedPoint)
- ((APFixedPoint *)(char *)Data.buffer)->~APFixedPoint();
+ ((APFixedPoint *)(char *)&Data)->~APFixedPoint();
else if (Kind == Vector)
- ((Vec*)(char*)Data.buffer)->~Vec();
+ ((Vec *)(char *)&Data)->~Vec();
else if (Kind == ComplexInt)
- ((ComplexAPSInt*)(char*)Data.buffer)->~ComplexAPSInt();
+ ((ComplexAPSInt *)(char *)&Data)->~ComplexAPSInt();
else if (Kind == ComplexFloat)
- ((ComplexAPFloat*)(char*)Data.buffer)->~ComplexAPFloat();
+ ((ComplexAPFloat *)(char *)&Data)->~ComplexAPFloat();
else if (Kind == LValue)
- ((LV*)(char*)Data.buffer)->~LV();
+ ((LV *)(char *)&Data)->~LV();
else if (Kind == Array)
- ((Arr*)(char*)Data.buffer)->~Arr();
+ ((Arr *)(char *)&Data)->~Arr();
else if (Kind == Struct)
- ((StructData*)(char*)Data.buffer)->~StructData();
+ ((StructData *)(char *)&Data)->~StructData();
else if (Kind == Union)
- ((UnionData*)(char*)Data.buffer)->~UnionData();
+ ((UnionData *)(char *)&Data)->~UnionData();
else if (Kind == MemberPointer)
- ((MemberPointerData*)(char*)Data.buffer)->~MemberPointerData();
+ ((MemberPointerData *)(char *)&Data)->~MemberPointerData();
else if (Kind == AddrLabelDiff)
- ((AddrLabelDiffData*)(char*)Data.buffer)->~AddrLabelDiffData();
+ ((AddrLabelDiffData *)(char *)&Data)->~AddrLabelDiffData();
Kind = None;
}
@@ -362,20 +456,165 @@ bool APValue::needsCleanup() const {
"same size.");
return getComplexIntReal().needsCleanup();
case LValue:
- return reinterpret_cast<const LV *>(Data.buffer)->hasPathPtr();
+ return reinterpret_cast<const LV *>(&Data)->hasPathPtr();
case MemberPointer:
- return reinterpret_cast<const MemberPointerData *>(Data.buffer)
- ->hasPathPtr();
+ return reinterpret_cast<const MemberPointerData *>(&Data)->hasPathPtr();
}
llvm_unreachable("Unknown APValue kind!");
}
void APValue::swap(APValue &RHS) {
std::swap(Kind, RHS.Kind);
- char TmpData[DataSize];
- memcpy(TmpData, Data.buffer, DataSize);
- memcpy(Data.buffer, RHS.Data.buffer, DataSize);
- memcpy(RHS.Data.buffer, TmpData, DataSize);
+ std::swap(Data, RHS.Data);
+}
+
+/// Profile the value of an APInt, excluding its bit-width.
+static void profileIntValue(llvm::FoldingSetNodeID &ID, const llvm::APInt &V) {
+ for (unsigned I = 0, N = V.getBitWidth(); I < N; I += 32)
+ ID.AddInteger((uint32_t)V.extractBitsAsZExtValue(std::min(32u, N - I), I));
+}
+
+void APValue::Profile(llvm::FoldingSetNodeID &ID) const {
+ // Note that our profiling assumes that only APValues of the same type are
+ // ever compared. As a result, we don't consider collisions that could only
+ // happen if the types are different. (For example, structs with different
+ // numbers of members could profile the same.)
+
+ ID.AddInteger(Kind);
+
+ switch (Kind) {
+ case None:
+ case Indeterminate:
+ return;
+
+ case AddrLabelDiff:
+ ID.AddPointer(getAddrLabelDiffLHS()->getLabel()->getCanonicalDecl());
+ ID.AddPointer(getAddrLabelDiffRHS()->getLabel()->getCanonicalDecl());
+ return;
+
+ case Struct:
+ for (unsigned I = 0, N = getStructNumBases(); I != N; ++I)
+ getStructBase(I).Profile(ID);
+ for (unsigned I = 0, N = getStructNumFields(); I != N; ++I)
+ getStructField(I).Profile(ID);
+ return;
+
+ case Union:
+ if (!getUnionField()) {
+ ID.AddInteger(0);
+ return;
+ }
+ ID.AddInteger(getUnionField()->getFieldIndex() + 1);
+ getUnionValue().Profile(ID);
+ return;
+
+ case Array: {
+ if (getArraySize() == 0)
+ return;
+
+ // The profile should not depend on whether the array is expanded or
+ // not, but we don't want to profile the array filler many times for
+ // a large array. So treat all equal trailing elements as the filler.
+ // Elements are profiled in reverse order to support this, and the
+ // first profiled element is followed by a count. For example:
+ //
+ // ['a', 'c', 'x', 'x', 'x'] is profiled as
+ // [5, 'x', 3, 'c', 'a']
+ llvm::FoldingSetNodeID FillerID;
+ (hasArrayFiller() ? getArrayFiller()
+ : getArrayInitializedElt(getArrayInitializedElts() - 1))
+ .Profile(FillerID);
+ ID.AddNodeID(FillerID);
+ unsigned NumFillers = getArraySize() - getArrayInitializedElts();
+ unsigned N = getArrayInitializedElts();
+
+ // Count the number of elements equal to the last one. This loop ends
+ // by adding an integer indicating the number of such elements, with
+ // N set to the number of elements left to profile.
+ while (true) {
+ if (N == 0) {
+ // All elements are fillers.
+ assert(NumFillers == getArraySize());
+ ID.AddInteger(NumFillers);
+ break;
+ }
+
+ // No need to check if the last element is equal to the last
+ // element.
+ if (N != getArraySize()) {
+ llvm::FoldingSetNodeID ElemID;
+ getArrayInitializedElt(N - 1).Profile(ElemID);
+ if (ElemID != FillerID) {
+ ID.AddInteger(NumFillers);
+ ID.AddNodeID(ElemID);
+ --N;
+ break;
+ }
+ }
+
+ // This is a filler.
+ ++NumFillers;
+ --N;
+ }
+
+ // Emit the remaining elements.
+ for (; N != 0; --N)
+ getArrayInitializedElt(N - 1).Profile(ID);
+ return;
+ }
+
+ case Vector:
+ for (unsigned I = 0, N = getVectorLength(); I != N; ++I)
+ getVectorElt(I).Profile(ID);
+ return;
+
+ case Int:
+ profileIntValue(ID, getInt());
+ return;
+
+ case Float:
+ profileIntValue(ID, getFloat().bitcastToAPInt());
+ return;
+
+ case FixedPoint:
+ profileIntValue(ID, getFixedPoint().getValue());
+ return;
+
+ case ComplexFloat:
+ profileIntValue(ID, getComplexFloatReal().bitcastToAPInt());
+ profileIntValue(ID, getComplexFloatImag().bitcastToAPInt());
+ return;
+
+ case ComplexInt:
+ profileIntValue(ID, getComplexIntReal());
+ profileIntValue(ID, getComplexIntImag());
+ return;
+
+ case LValue:
+ getLValueBase().Profile(ID);
+ ID.AddInteger(getLValueOffset().getQuantity());
+ ID.AddInteger((isNullPointer() ? 1 : 0) |
+ (isLValueOnePastTheEnd() ? 2 : 0) |
+ (hasLValuePath() ? 4 : 0));
+ if (hasLValuePath()) {
+ ID.AddInteger(getLValuePath().size());
+ // For uniqueness, we only need to profile the entries corresponding
+ // to union members, but we don't have the type here so we don't know
+ // how to interpret the entries.
+ for (LValuePathEntry E : getLValuePath())
+ E.Profile(ID);
+ }
+ return;
+
+ case MemberPointer:
+ ID.AddPointer(getMemberPointerDecl());
+ ID.AddInteger(isMemberPointerToDerivedMember());
+ for (const CXXRecordDecl *D : getMemberPointerPath())
+ ID.AddPointer(D);
+ return;
+ }
+
+ llvm_unreachable("Unknown APValue kind!");
}
static double GetApproxValue(const llvm::APFloat &F) {
@@ -388,6 +627,18 @@ static double GetApproxValue(const llvm::APFloat &F) {
void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
QualType Ty) const {
+ printPretty(Out, Ctx.getPrintingPolicy(), Ty, &Ctx);
+}
+
+void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
+ QualType Ty, const ASTContext *Ctx) const {
+ // There are no objects of type 'void', but values of this type can be
+ // returned from functions.
+ if (Ty->isVoidType()) {
+ Out << "void()";
+ return;
+ }
+
switch (getKind()) {
case APValue::None:
Out << "<out of lifetime>";
@@ -410,10 +661,10 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
case APValue::Vector: {
Out << '{';
QualType ElemTy = Ty->castAs<VectorType>()->getElementType();
- getVectorElt(0).printPretty(Out, Ctx, ElemTy);
+ getVectorElt(0).printPretty(Out, Policy, ElemTy, Ctx);
for (unsigned i = 1; i != getVectorLength(); ++i) {
Out << ", ";
- getVectorElt(i).printPretty(Out, Ctx, ElemTy);
+ getVectorElt(i).printPretty(Out, Policy, ElemTy, Ctx);
}
Out << '}';
return;
@@ -435,12 +686,12 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
LValueBase Base = getLValueBase();
if (!Base) {
if (isNullPointer()) {
- Out << (Ctx.getLangOpts().CPlusPlus11 ? "nullptr" : "0");
+ Out << (Policy.Nullptr ? "nullptr" : "0");
} else if (IsReference) {
- Out << "*(" << InnerTy.stream(Ctx.getPrintingPolicy()) << "*)"
+ Out << "*(" << InnerTy.stream(Policy) << "*)"
<< getLValueOffset().getQuantity();
} else {
- Out << "(" << Ty.stream(Ctx.getPrintingPolicy()) << ")"
+ Out << "(" << Ty.stream(Policy) << ")"
<< getLValueOffset().getQuantity();
}
return;
@@ -449,11 +700,11 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
if (!hasLValuePath()) {
// No lvalue path: just print the offset.
CharUnits O = getLValueOffset();
- CharUnits S = Ctx.getTypeSizeInChars(InnerTy);
+ CharUnits S = Ctx ? Ctx->getTypeSizeInChars(InnerTy) : CharUnits::Zero();
if (!O.isZero()) {
if (IsReference)
Out << "*(";
- if (O % S) {
+ if (S.isZero() || O % S) {
Out << "(char*)";
S = CharUnits::One();
}
@@ -465,16 +716,15 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
Out << *VD;
else if (TypeInfoLValue TI = Base.dyn_cast<TypeInfoLValue>()) {
- TI.print(Out, Ctx.getPrintingPolicy());
+ TI.print(Out, Policy);
} else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) {
Out << "{*new "
- << Base.getDynamicAllocType().stream(Ctx.getPrintingPolicy()) << "#"
+ << Base.getDynamicAllocType().stream(Policy) << "#"
<< DA.getIndex() << "}";
} else {
assert(Base.get<const Expr *>() != nullptr &&
"Expecting non-null Expr");
- Base.get<const Expr*>()->printPretty(Out, nullptr,
- Ctx.getPrintingPolicy());
+ Base.get<const Expr*>()->printPretty(Out, nullptr, Policy);
}
if (!O.isZero()) {
@@ -491,37 +741,31 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
else if (isLValueOnePastTheEnd())
Out << "*(&";
- QualType ElemTy;
+ QualType ElemTy = Base.getType();
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
Out << *VD;
- ElemTy = VD->getType();
} else if (TypeInfoLValue TI = Base.dyn_cast<TypeInfoLValue>()) {
- TI.print(Out, Ctx.getPrintingPolicy());
- ElemTy = Base.getTypeInfoType();
+ TI.print(Out, Policy);
} else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) {
- Out << "{*new "
- << Base.getDynamicAllocType().stream(Ctx.getPrintingPolicy()) << "#"
+ Out << "{*new " << Base.getDynamicAllocType().stream(Policy) << "#"
<< DA.getIndex() << "}";
- ElemTy = Base.getDynamicAllocType();
} else {
const Expr *E = Base.get<const Expr*>();
assert(E != nullptr && "Expecting non-null Expr");
- E->printPretty(Out, nullptr, Ctx.getPrintingPolicy());
- // FIXME: This is wrong if E is a MaterializeTemporaryExpr with an lvalue
- // adjustment.
- ElemTy = E->getType();
+ E->printPretty(Out, nullptr, Policy);
}
ArrayRef<LValuePathEntry> Path = getLValuePath();
const CXXRecordDecl *CastToBase = nullptr;
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
- if (ElemTy->getAs<RecordType>()) {
+ if (ElemTy->isRecordType()) {
// The lvalue refers to a class type, so the next path entry is a base
// or member.
const Decl *BaseOrMember = Path[I].getAsBaseOrMember().getPointer();
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
CastToBase = RD;
- ElemTy = Ctx.getRecordType(RD);
+ // Leave ElemTy referring to the most-derived class. The actual type
+ // doesn't matter except for array types.
} else {
const ValueDecl *VD = cast<ValueDecl>(BaseOrMember);
Out << ".";
@@ -533,7 +777,7 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
} else {
// The lvalue must refer to an array.
Out << '[' << Path[I].getAsArrayIndex() << ']';
- ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType();
+ ElemTy = ElemTy->castAsArrayTypeUnsafe()->getElementType();
}
}
@@ -548,11 +792,11 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
return;
}
case APValue::Array: {
- const ArrayType *AT = Ctx.getAsArrayType(Ty);
+ const ArrayType *AT = Ty->castAsArrayTypeUnsafe();
QualType ElemTy = AT->getElementType();
Out << '{';
if (unsigned N = getArrayInitializedElts()) {
- getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy);
+ getArrayInitializedElt(0).printPretty(Out, Policy, ElemTy, Ctx);
for (unsigned I = 1; I != N; ++I) {
Out << ", ";
if (I == 10) {
@@ -560,7 +804,7 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
Out << "...";
break;
}
- getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy);
+ getArrayInitializedElt(I).printPretty(Out, Policy, ElemTy, Ctx);
}
}
Out << '}';
@@ -577,7 +821,7 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
assert(BI != CD->bases_end());
if (!First)
Out << ", ";
- getStructBase(I).printPretty(Out, Ctx, BI->getType());
+ getStructBase(I).printPretty(Out, Policy, BI->getType(), Ctx);
First = false;
}
}
@@ -586,7 +830,7 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
Out << ", ";
if (FI->isUnnamedBitfield()) continue;
getStructField(FI->getFieldIndex()).
- printPretty(Out, Ctx, FI->getType());
+ printPretty(Out, Policy, FI->getType(), Ctx);
First = false;
}
Out << '}';
@@ -596,7 +840,7 @@ void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
Out << '{';
if (const FieldDecl *FD = getUnionField()) {
Out << "." << *FD << " = ";
- getUnionValue().printPretty(Out, Ctx, FD->getType());
+ getUnionValue().printPretty(Out, Policy, FD->getType(), Ctx);
}
Out << '}';
return;
@@ -648,49 +892,49 @@ bool APValue::toIntegralConstant(APSInt &Result, QualType SrcTy,
const APValue::LValueBase APValue::getLValueBase() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const void*)Data.buffer)->Base;
+ return ((const LV *)(const void *)&Data)->Base;
}
bool APValue::isLValueOnePastTheEnd() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const void*)Data.buffer)->IsOnePastTheEnd;
+ return ((const LV *)(const void *)&Data)->IsOnePastTheEnd;
}
CharUnits &APValue::getLValueOffset() {
assert(isLValue() && "Invalid accessor");
- return ((LV*)(void*)Data.buffer)->Offset;
+ return ((LV *)(void *)&Data)->Offset;
}
bool APValue::hasLValuePath() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const char*)Data.buffer)->hasPath();
+ return ((const LV *)(const char *)&Data)->hasPath();
}
ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
assert(isLValue() && hasLValuePath() && "Invalid accessor");
- const LV &LVal = *((const LV*)(const char*)Data.buffer);
+ const LV &LVal = *((const LV *)(const char *)&Data);
return llvm::makeArrayRef(LVal.getPath(), LVal.PathLength);
}
unsigned APValue::getLValueCallIndex() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const char*)Data.buffer)->Base.getCallIndex();
+ return ((const LV *)(const char *)&Data)->Base.getCallIndex();
}
unsigned APValue::getLValueVersion() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const char*)Data.buffer)->Base.getVersion();
+ return ((const LV *)(const char *)&Data)->Base.getVersion();
}
bool APValue::isNullPointer() const {
assert(isLValue() && "Invalid usage");
- return ((const LV*)(const char*)Data.buffer)->IsNullPtr;
+ return ((const LV *)(const char *)&Data)->IsNullPtr;
}
void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
bool IsNullPtr) {
assert(isLValue() && "Invalid accessor");
- LV &LVal = *((LV*)(char*)Data.buffer);
+ LV &LVal = *((LV *)(char *)&Data);
LVal.Base = B;
LVal.IsOnePastTheEnd = false;
LVal.Offset = O;
@@ -698,60 +942,188 @@ void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
LVal.IsNullPtr = IsNullPtr;
}
-void APValue::setLValue(LValueBase B, const CharUnits &O,
- ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
- bool IsNullPtr) {
+MutableArrayRef<APValue::LValuePathEntry>
+APValue::setLValueUninit(LValueBase B, const CharUnits &O, unsigned Size,
+ bool IsOnePastTheEnd, bool IsNullPtr) {
assert(isLValue() && "Invalid accessor");
- LV &LVal = *((LV*)(char*)Data.buffer);
+ LV &LVal = *((LV *)(char *)&Data);
LVal.Base = B;
LVal.IsOnePastTheEnd = IsOnePastTheEnd;
LVal.Offset = O;
- LVal.resizePath(Path.size());
- memcpy(LVal.getPath(), Path.data(), Path.size() * sizeof(LValuePathEntry));
LVal.IsNullPtr = IsNullPtr;
+ LVal.resizePath(Size);
+ return {LVal.getPath(), Size};
+}
+
+void APValue::setLValue(LValueBase B, const CharUnits &O,
+ ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
+ bool IsNullPtr) {
+ MutableArrayRef<APValue::LValuePathEntry> InternalPath =
+ setLValueUninit(B, O, Path.size(), IsOnePastTheEnd, IsNullPtr);
+ if (Path.size()) {
+ memcpy(InternalPath.data(), Path.data(),
+ Path.size() * sizeof(LValuePathEntry));
+ }
+}
+
+void APValue::setUnion(const FieldDecl *Field, const APValue &Value) {
+ assert(isUnion() && "Invalid accessor");
+ ((UnionData *)(char *)&Data)->Field =
+ Field ? Field->getCanonicalDecl() : nullptr;
+ *((UnionData *)(char *)&Data)->Value = Value;
}
const ValueDecl *APValue::getMemberPointerDecl() const {
assert(isMemberPointer() && "Invalid accessor");
const MemberPointerData &MPD =
- *((const MemberPointerData *)(const char *)Data.buffer);
+ *((const MemberPointerData *)(const char *)&Data);
return MPD.MemberAndIsDerivedMember.getPointer();
}
bool APValue::isMemberPointerToDerivedMember() const {
assert(isMemberPointer() && "Invalid accessor");
const MemberPointerData &MPD =
- *((const MemberPointerData *)(const char *)Data.buffer);
+ *((const MemberPointerData *)(const char *)&Data);
return MPD.MemberAndIsDerivedMember.getInt();
}
ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
assert(isMemberPointer() && "Invalid accessor");
const MemberPointerData &MPD =
- *((const MemberPointerData *)(const char *)Data.buffer);
+ *((const MemberPointerData *)(const char *)&Data);
return llvm::makeArrayRef(MPD.getPath(), MPD.PathLength);
}
void APValue::MakeLValue() {
assert(isAbsent() && "Bad state change");
static_assert(sizeof(LV) <= DataSize, "LV too big");
- new ((void*)(char*)Data.buffer) LV();
+ new ((void *)(char *)&Data) LV();
Kind = LValue;
}
void APValue::MakeArray(unsigned InitElts, unsigned Size) {
assert(isAbsent() && "Bad state change");
- new ((void*)(char*)Data.buffer) Arr(InitElts, Size);
+ new ((void *)(char *)&Data) Arr(InitElts, Size);
Kind = Array;
}
-void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
- ArrayRef<const CXXRecordDecl*> Path) {
+MutableArrayRef<APValue::LValuePathEntry>
+setLValueUninit(APValue::LValueBase B, const CharUnits &O, unsigned Size,
+ bool OnePastTheEnd, bool IsNullPtr);
+
+MutableArrayRef<const CXXRecordDecl *>
+APValue::setMemberPointerUninit(const ValueDecl *Member, bool IsDerivedMember,
+ unsigned Size) {
assert(isAbsent() && "Bad state change");
- MemberPointerData *MPD = new ((void*)(char*)Data.buffer) MemberPointerData;
+ MemberPointerData *MPD = new ((void *)(char *)&Data) MemberPointerData;
Kind = MemberPointer;
- MPD->MemberAndIsDerivedMember.setPointer(Member);
+ MPD->MemberAndIsDerivedMember.setPointer(
+ Member ? cast<ValueDecl>(Member->getCanonicalDecl()) : nullptr);
MPD->MemberAndIsDerivedMember.setInt(IsDerivedMember);
- MPD->resizePath(Path.size());
- memcpy(MPD->getPath(), Path.data(), Path.size()*sizeof(const CXXRecordDecl*));
+ MPD->resizePath(Size);
+ return {MPD->getPath(), MPD->PathLength};
+}
+
+void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
+ ArrayRef<const CXXRecordDecl *> Path) {
+ MutableArrayRef<const CXXRecordDecl *> InternalPath =
+ setMemberPointerUninit(Member, IsDerivedMember, Path.size());
+ for (unsigned I = 0; I != Path.size(); ++I)
+ InternalPath[I] = Path[I]->getCanonicalDecl();
+}
+
+LinkageInfo LinkageComputer::getLVForValue(const APValue &V,
+ LVComputationKind computation) {
+ LinkageInfo LV = LinkageInfo::external();
+
+ auto MergeLV = [&](LinkageInfo MergeLV) {
+ LV.merge(MergeLV);
+ return LV.getLinkage() == InternalLinkage;
+ };
+ auto Merge = [&](const APValue &V) {
+ return MergeLV(getLVForValue(V, computation));
+ };
+
+ switch (V.getKind()) {
+ case APValue::None:
+ case APValue::Indeterminate:
+ case APValue::Int:
+ case APValue::Float:
+ case APValue::FixedPoint:
+ case APValue::ComplexInt:
+ case APValue::ComplexFloat:
+ case APValue::Vector:
+ break;
+
+ case APValue::AddrLabelDiff:
+ // Even for an inline function, it's not reasonable to treat a difference
+ // between the addresses of labels as an external value.
+ return LinkageInfo::internal();
+
+ case APValue::Struct: {
+ for (unsigned I = 0, N = V.getStructNumBases(); I != N; ++I)
+ if (Merge(V.getStructBase(I)))
+ break;
+ for (unsigned I = 0, N = V.getStructNumFields(); I != N; ++I)
+ if (Merge(V.getStructField(I)))
+ break;
+ break;
+ }
+
+ case APValue::Union:
+ if (V.getUnionField())
+ Merge(V.getUnionValue());
+ break;
+
+ case APValue::Array: {
+ for (unsigned I = 0, N = V.getArrayInitializedElts(); I != N; ++I)
+ if (Merge(V.getArrayInitializedElt(I)))
+ break;
+ if (V.hasArrayFiller())
+ Merge(V.getArrayFiller());
+ break;
+ }
+
+ case APValue::LValue: {
+ if (!V.getLValueBase()) {
+ // Null or absolute address: this is external.
+ } else if (const auto *VD =
+ V.getLValueBase().dyn_cast<const ValueDecl *>()) {
+ if (VD && MergeLV(getLVForDecl(VD, computation)))
+ break;
+ } else if (const auto TI = V.getLValueBase().dyn_cast<TypeInfoLValue>()) {
+ if (MergeLV(getLVForType(*TI.getType(), computation)))
+ break;
+ } else if (const Expr *E = V.getLValueBase().dyn_cast<const Expr *>()) {
+ // Almost all expression bases are internal. The exception is
+ // lifetime-extended temporaries.
+ // FIXME: These should be modeled as having the
+ // LifetimeExtendedTemporaryDecl itself as the base.
+ // FIXME: If we permit Objective-C object literals in template arguments,
+ // they should not imply internal linkage.
+ auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E);
+ if (!MTE || MTE->getStorageDuration() == SD_FullExpression)
+ return LinkageInfo::internal();
+ if (MergeLV(getLVForDecl(MTE->getExtendingDecl(), computation)))
+ break;
+ } else {
+ assert(V.getLValueBase().is<DynamicAllocLValue>() &&
+ "unexpected LValueBase kind");
+ return LinkageInfo::internal();
+ }
+ // The lvalue path doesn't matter: pointers to all subobjects always have
+ // the same visibility as pointers to the complete object.
+ break;
+ }
+
+ case APValue::MemberPointer:
+ if (const NamedDecl *D = V.getMemberPointerDecl())
+ MergeLV(getLVForDecl(D, computation));
+ // Note that we could have a base-to-derived conversion here to a member of
+ // a derived class with less linkage/visibility. That's covered by the
+ // linkage and visibility of the value's type.
+ break;
+ }
+
+ return LV;
}
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 2ba643f12a82..cb7f00abf9e9 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -51,7 +51,6 @@
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CommentOptions.h"
#include "clang/Basic/ExceptionSpecificationType.h"
-#include "clang/Basic/FixedPoint.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
@@ -65,6 +64,7 @@
#include "clang/Basic/TargetCXXABI.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/XRayLists.h"
+#include "llvm/ADT/APFixedPoint.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
@@ -753,10 +753,10 @@ canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC,
CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack());
if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC))
- NewIDC = new (C) CXXFoldExpr(OrigFold->getType(), SourceLocation(), NewIDC,
- BinaryOperatorKind::BO_LAnd,
- SourceLocation(), /*RHS=*/nullptr,
- SourceLocation(), /*NumExpansions=*/None);
+ NewIDC = new (C) CXXFoldExpr(
+ OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC,
+ BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr,
+ SourceLocation(), /*NumExpansions=*/None);
return NewIDC;
}
@@ -883,10 +883,10 @@ CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
if (!LangOpts.CPlusPlus) return nullptr;
switch (T.getCXXABI().getKind()) {
+ case TargetCXXABI::AppleARM64:
case TargetCXXABI::Fuchsia:
case TargetCXXABI::GenericARM: // Same as Itanium at this level
case TargetCXXABI::iOS:
- case TargetCXXABI::iOS64:
case TargetCXXABI::WatchOS:
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericMIPS:
@@ -919,18 +919,20 @@ static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
// The fake address space map must have a distinct entry for each
// language-specific address space.
static const unsigned FakeAddrSpaceMap[] = {
- 0, // Default
- 1, // opencl_global
- 3, // opencl_local
- 2, // opencl_constant
- 0, // opencl_private
- 4, // opencl_generic
- 5, // cuda_device
- 6, // cuda_constant
- 7, // cuda_shared
- 8, // ptr32_sptr
- 9, // ptr32_uptr
- 10 // ptr64
+ 0, // Default
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 0, // opencl_private
+ 4, // opencl_generic
+ 5, // opencl_global_device
+ 6, // opencl_global_host
+ 7, // cuda_device
+ 8, // cuda_constant
+ 9, // cuda_shared
+ 10, // ptr32_sptr
+ 11, // ptr32_uptr
+ 12 // ptr64
};
return &FakeAddrSpaceMap;
} else {
@@ -963,6 +965,7 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
LangOpts.XRayNeverInstrumentFiles,
LangOpts.XRayAttrListFiles, SM)),
+ ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM),
CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
@@ -1003,9 +1006,6 @@ ASTContext::~ASTContext() {
for (const auto &Value : ModuleInitializers)
Value.second->~PerModuleInitializers();
-
- for (APValue *Value : APValueCleanups)
- Value->~APValue();
}
void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
@@ -1424,6 +1424,18 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
#include "clang/Basic/AArch64SVEACLETypes.def"
}
+ if (Target.getTriple().isPPC64() &&
+ Target.hasFeature("paired-vector-memops")) {
+ if (Target.hasFeature("mma")) {
+#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
+ InitBuiltinType(Id##Ty, BuiltinType::Id);
+#include "clang/Basic/PPCTypes.def"
+ }
+#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
+ InitBuiltinType(Id##Ty, BuiltinType::Id);
+#include "clang/Basic/PPCTypes.def"
+ }
+
// Builtin type for __objc_yes and __objc_no
ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
SignedCharTy : BoolTy);
@@ -1775,9 +1787,8 @@ CharUnits ASTContext::getExnObjectAlignment() const {
// chars. If the type is a record, its data size is returned. This is
// the size of the memcpy that's performed when assigning this type
// using a trivial copy/move assignment operator.
-std::pair<CharUnits, CharUnits>
-ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
- std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
+TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
+ TypeInfoChars Info = getTypeInfoInChars(T);
// In C++, objects can sometimes be allocated into the tail padding
// of a base-class subobject. We decide whether that's possible
@@ -1785,44 +1796,43 @@ ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
if (getLangOpts().CPlusPlus) {
if (const auto *RT = T->getAs<RecordType>()) {
const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
- sizeAndAlign.first = layout.getDataSize();
+ Info.Width = layout.getDataSize();
}
}
- return sizeAndAlign;
+ return Info;
}
/// getConstantArrayInfoInChars - Performing the computation in CharUnits
/// instead of in bits prevents overflowing the uint64_t for some large arrays.
-std::pair<CharUnits, CharUnits>
+TypeInfoChars
static getConstantArrayInfoInChars(const ASTContext &Context,
const ConstantArrayType *CAT) {
- std::pair<CharUnits, CharUnits> EltInfo =
- Context.getTypeInfoInChars(CAT->getElementType());
+ TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType());
uint64_t Size = CAT->getSize().getZExtValue();
- assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <=
+ assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
(uint64_t)(-1)/Size) &&
"Overflow in array type char size evaluation");
- uint64_t Width = EltInfo.first.getQuantity() * Size;
- unsigned Align = EltInfo.second.getQuantity();
+ uint64_t Width = EltInfo.Width.getQuantity() * Size;
+ unsigned Align = EltInfo.Align.getQuantity();
if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
Context.getTargetInfo().getPointerWidth(0) == 64)
Width = llvm::alignTo(Width, Align);
- return std::make_pair(CharUnits::fromQuantity(Width),
- CharUnits::fromQuantity(Align));
+ return TypeInfoChars(CharUnits::fromQuantity(Width),
+ CharUnits::fromQuantity(Align),
+ EltInfo.AlignIsRequired);
}
-std::pair<CharUnits, CharUnits>
-ASTContext::getTypeInfoInChars(const Type *T) const {
+TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
return getConstantArrayInfoInChars(*this, CAT);
TypeInfo Info = getTypeInfo(T);
- return std::make_pair(toCharUnitsFromBits(Info.Width),
- toCharUnitsFromBits(Info.Align));
+ return TypeInfoChars(toCharUnitsFromBits(Info.Width),
+ toCharUnitsFromBits(Info.Align),
+ Info.AlignIsRequired);
}
-std::pair<CharUnits, CharUnits>
-ASTContext::getTypeInfoInChars(QualType T) const {
+TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const {
return getTypeInfoInChars(T.getTypePtr());
}
@@ -1834,7 +1844,8 @@ bool ASTContext::isAlignmentRequired(QualType T) const {
return isAlignmentRequired(T.getTypePtr());
}
-unsigned ASTContext::getTypeAlignIfKnown(QualType T) const {
+unsigned ASTContext::getTypeAlignIfKnown(QualType T,
+ bool NeedsPreferredAlignment) const {
// An alignment on a typedef overrides anything else.
if (const auto *TT = T->getAs<TypedefType>())
if (unsigned Align = TT->getDecl()->getMaxAlignment())
@@ -1843,7 +1854,7 @@ unsigned ASTContext::getTypeAlignIfKnown(QualType T) const {
// If we have an (array of) complete type, we're done.
T = getBaseElementType(T);
if (!T->isIncompleteType())
- return getTypeAlign(T);
+ return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
// If we had an array type, its element type might be a typedef
// type with an alignment attribute.
@@ -1935,6 +1946,13 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
if (TargetVectorAlign && TargetVectorAlign < Align)
Align = TargetVectorAlign;
+ if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector)
+ // Adjust the alignment for fixed-length SVE vectors. This is important
+ // for non-power-of-2 vector lengths.
+ Align = 128;
+ else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ // Adjust the alignment for fixed-length SVE predicates.
+ Align = 16;
break;
}
@@ -2143,6 +2161,12 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = 16; \
break;
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id: \
+ Width = Size; \
+ Align = Size; \
+ break;
+#include "clang/Basic/PPCTypes.def"
}
break;
case Type::ObjCObjectPointer:
@@ -2343,12 +2367,6 @@ unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
- // Target ppc64 with QPX: simd default alignment for pointer to double is 32.
- if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 ||
- getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) &&
- getTargetInfo().getABI() == "elfv1-qpx" &&
- T->isSpecificBuiltinType(BuiltinType::Double))
- SimdAlign = 256;
return SimdAlign;
}
@@ -2365,10 +2383,10 @@ int64_t ASTContext::toBits(CharUnits CharSize) const {
/// getTypeSizeInChars - Return the size of the specified type, in characters.
/// This method does not work on incomplete types.
CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
- return getTypeInfoInChars(T).first;
+ return getTypeInfoInChars(T).Width;
}
CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
- return getTypeInfoInChars(T).first;
+ return getTypeInfoInChars(T).Width;
}
/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
@@ -2392,8 +2410,9 @@ CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const {
/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
/// type for the current target in bits. This can be different than the ABI
-/// alignment in cases where it is beneficial for performance to overalign
-/// a data type.
+/// alignment in cases where it is beneficial for performance or backwards
+/// compatibility preserving to overalign a data type. (Note: despite the name,
+/// the preferred alignment is ABI-impacting, and not an optimization.)
unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
TypeInfo TI = getTypeInfo(T);
unsigned ABIAlign = TI.Align;
@@ -2403,18 +2422,33 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
// The preferred alignment of member pointers is that of a pointer.
if (T->isMemberPointerType())
return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
-
+
if (!Target->allowsLargerPreferedTypeAlignment())
return ABIAlign;
- // Double and long long should be naturally aligned if possible.
+ if (const auto *RT = T->getAs<RecordType>()) {
+ if (TI.AlignIsRequired || RT->getDecl()->isInvalidDecl())
+ return ABIAlign;
+
+ unsigned PreferredAlign = static_cast<unsigned>(
+ toBits(getASTRecordLayout(RT->getDecl()).PreferredAlignment));
+ assert(PreferredAlign >= ABIAlign &&
+ "PreferredAlign should be at least as large as ABIAlign.");
+ return PreferredAlign;
+ }
+
+ // Double (and, for targets supporting AIX `power` alignment, long double) and
+ // long long should be naturally aligned (despite requiring less alignment) if
+ // possible.
if (const auto *CT = T->getAs<ComplexType>())
T = CT->getElementType().getTypePtr();
if (const auto *ET = T->getAs<EnumType>())
T = ET->getDecl()->getIntegerType().getTypePtr();
if (T->isSpecificBuiltinType(BuiltinType::Double) ||
T->isSpecificBuiltinType(BuiltinType::LongLong) ||
- T->isSpecificBuiltinType(BuiltinType::ULongLong))
+ T->isSpecificBuiltinType(BuiltinType::ULongLong) ||
+ (T->isSpecificBuiltinType(BuiltinType::LongDouble) &&
+ Target->defaultsToAIXPowerAlignment()))
// Don't increase the alignment if an alignment attribute was specified on a
// typedef declaration.
if (!TI.AlignIsRequired)
@@ -2434,7 +2468,8 @@ unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
/// to a global variable of the specified type.
unsigned ASTContext::getAlignOfGlobalVar(QualType T) const {
uint64_t TypeSize = getTypeSize(T.getTypePtr());
- return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign(TypeSize));
+ return std::max(getPreferredTypeAlign(T),
+ getTargetInfo().getMinGlobalAlign(TypeSize));
}
/// getAlignOfGlobalVarInChars - Return the alignment in characters that
@@ -2453,6 +2488,25 @@ CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
return Offset;
}
+CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const {
+ const ValueDecl *MPD = MP.getMemberPointerDecl();
+ CharUnits ThisAdjustment = CharUnits::Zero();
+ ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
+ bool DerivedMember = MP.isMemberPointerToDerivedMember();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = RD;
+ const CXXRecordDecl *Derived = Path[I];
+ if (DerivedMember)
+ std::swap(Base, Derived);
+ ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base);
+ RD = Path[I];
+ }
+ if (DerivedMember)
+ ThisAdjustment = -ThisAdjustment;
+ return ThisAdjustment;
+}
+
/// DeepCollectObjCIvars -
/// This routine first collects all declared, but not synthesized, ivars in
/// super class and then collects all ivars, including those synthesized for
@@ -2870,14 +2924,27 @@ QualType ASTContext::getAddrSpaceQualType(QualType T,
}
QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
+ // If the type is not qualified with an address space, just return it
+ // immediately.
+ if (!T.hasAddressSpace())
+ return T;
+
// If we are composing extended qualifiers together, merge together
// into one ExtQuals node.
QualifierCollector Quals;
- const Type *TypeNode = Quals.strip(T);
+ const Type *TypeNode;
- // If the qualifier doesn't have an address space just return it.
- if (!Quals.hasAddressSpace())
- return T;
+ while (T.hasAddressSpace()) {
+ TypeNode = Quals.strip(T);
+
+ // If the type no longer has an address space after stripping qualifiers,
+ // jump out.
+ if (!QualType(TypeNode, 0).hasAddressSpace())
+ break;
+
+ // There might be sugar in the way. Strip it and try again.
+ T = T.getSingleStepDesugaredType(*this);
+ }
Quals.removeAddressSpace();
@@ -3634,6 +3701,119 @@ QualType ASTContext::getIncompleteArrayType(QualType elementType,
return QualType(newType, 0);
}
+ASTContext::BuiltinVectorTypeInfo
+ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
+#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
+ {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
+ NUMVECTORS};
+
+#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
+ {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
+
+ switch (Ty->getKind()) {
+ default:
+ llvm_unreachable("Unsupported builtin vector type");
+ case BuiltinType::SveInt8:
+ return SVE_INT_ELTTY(8, 16, true, 1);
+ case BuiltinType::SveUint8:
+ return SVE_INT_ELTTY(8, 16, false, 1);
+ case BuiltinType::SveInt8x2:
+ return SVE_INT_ELTTY(8, 16, true, 2);
+ case BuiltinType::SveUint8x2:
+ return SVE_INT_ELTTY(8, 16, false, 2);
+ case BuiltinType::SveInt8x3:
+ return SVE_INT_ELTTY(8, 16, true, 3);
+ case BuiltinType::SveUint8x3:
+ return SVE_INT_ELTTY(8, 16, false, 3);
+ case BuiltinType::SveInt8x4:
+ return SVE_INT_ELTTY(8, 16, true, 4);
+ case BuiltinType::SveUint8x4:
+ return SVE_INT_ELTTY(8, 16, false, 4);
+ case BuiltinType::SveInt16:
+ return SVE_INT_ELTTY(16, 8, true, 1);
+ case BuiltinType::SveUint16:
+ return SVE_INT_ELTTY(16, 8, false, 1);
+ case BuiltinType::SveInt16x2:
+ return SVE_INT_ELTTY(16, 8, true, 2);
+ case BuiltinType::SveUint16x2:
+ return SVE_INT_ELTTY(16, 8, false, 2);
+ case BuiltinType::SveInt16x3:
+ return SVE_INT_ELTTY(16, 8, true, 3);
+ case BuiltinType::SveUint16x3:
+ return SVE_INT_ELTTY(16, 8, false, 3);
+ case BuiltinType::SveInt16x4:
+ return SVE_INT_ELTTY(16, 8, true, 4);
+ case BuiltinType::SveUint16x4:
+ return SVE_INT_ELTTY(16, 8, false, 4);
+ case BuiltinType::SveInt32:
+ return SVE_INT_ELTTY(32, 4, true, 1);
+ case BuiltinType::SveUint32:
+ return SVE_INT_ELTTY(32, 4, false, 1);
+ case BuiltinType::SveInt32x2:
+ return SVE_INT_ELTTY(32, 4, true, 2);
+ case BuiltinType::SveUint32x2:
+ return SVE_INT_ELTTY(32, 4, false, 2);
+ case BuiltinType::SveInt32x3:
+ return SVE_INT_ELTTY(32, 4, true, 3);
+ case BuiltinType::SveUint32x3:
+ return SVE_INT_ELTTY(32, 4, false, 3);
+ case BuiltinType::SveInt32x4:
+ return SVE_INT_ELTTY(32, 4, true, 4);
+ case BuiltinType::SveUint32x4:
+ return SVE_INT_ELTTY(32, 4, false, 4);
+ case BuiltinType::SveInt64:
+ return SVE_INT_ELTTY(64, 2, true, 1);
+ case BuiltinType::SveUint64:
+ return SVE_INT_ELTTY(64, 2, false, 1);
+ case BuiltinType::SveInt64x2:
+ return SVE_INT_ELTTY(64, 2, true, 2);
+ case BuiltinType::SveUint64x2:
+ return SVE_INT_ELTTY(64, 2, false, 2);
+ case BuiltinType::SveInt64x3:
+ return SVE_INT_ELTTY(64, 2, true, 3);
+ case BuiltinType::SveUint64x3:
+ return SVE_INT_ELTTY(64, 2, false, 3);
+ case BuiltinType::SveInt64x4:
+ return SVE_INT_ELTTY(64, 2, true, 4);
+ case BuiltinType::SveUint64x4:
+ return SVE_INT_ELTTY(64, 2, false, 4);
+ case BuiltinType::SveBool:
+ return SVE_ELTTY(BoolTy, 16, 1);
+ case BuiltinType::SveFloat16:
+ return SVE_ELTTY(HalfTy, 8, 1);
+ case BuiltinType::SveFloat16x2:
+ return SVE_ELTTY(HalfTy, 8, 2);
+ case BuiltinType::SveFloat16x3:
+ return SVE_ELTTY(HalfTy, 8, 3);
+ case BuiltinType::SveFloat16x4:
+ return SVE_ELTTY(HalfTy, 8, 4);
+ case BuiltinType::SveFloat32:
+ return SVE_ELTTY(FloatTy, 4, 1);
+ case BuiltinType::SveFloat32x2:
+ return SVE_ELTTY(FloatTy, 4, 2);
+ case BuiltinType::SveFloat32x3:
+ return SVE_ELTTY(FloatTy, 4, 3);
+ case BuiltinType::SveFloat32x4:
+ return SVE_ELTTY(FloatTy, 4, 4);
+ case BuiltinType::SveFloat64:
+ return SVE_ELTTY(DoubleTy, 2, 1);
+ case BuiltinType::SveFloat64x2:
+ return SVE_ELTTY(DoubleTy, 2, 2);
+ case BuiltinType::SveFloat64x3:
+ return SVE_ELTTY(DoubleTy, 2, 3);
+ case BuiltinType::SveFloat64x4:
+ return SVE_ELTTY(DoubleTy, 2, 4);
+ case BuiltinType::SveBFloat16:
+ return SVE_ELTTY(BFloat16Ty, 8, 1);
+ case BuiltinType::SveBFloat16x2:
+ return SVE_ELTTY(BFloat16Ty, 8, 2);
+ case BuiltinType::SveBFloat16x3:
+ return SVE_ELTTY(BFloat16Ty, 8, 3);
+ case BuiltinType::SveBFloat16x4:
+ return SVE_ELTTY(BFloat16Ty, 8, 4);
+ }
+}
+
/// getScalableVectorType - Return the unique reference to a scalable vector
/// type of the specified element type and size. VectorType must be a built-in
/// type.
@@ -4276,15 +4456,15 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
/// getTypedefType - Return the unique reference to the type for the
/// specified typedef name decl.
-QualType
-ASTContext::getTypedefType(const TypedefNameDecl *Decl,
- QualType Canonical) const {
+QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl,
+ QualType Underlying) const {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
- if (Canonical.isNull())
- Canonical = getCanonicalType(Decl->getUnderlyingType());
+ if (Underlying.isNull())
+ Underlying = Decl->getUnderlyingType();
+ QualType Canonical = getCanonicalType(Underlying);
auto *newType = new (*this, TypeAlignment)
- TypedefType(Type::Typedef, Decl, Canonical);
+ TypedefType(Type::Typedef, Decl, Underlying, Canonical);
Decl->TypeForDecl = newType;
Types.push_back(newType);
return QualType(newType, 0);
@@ -4722,9 +4902,16 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
Arg = TemplateArgument(ArgType);
} else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ QualType T =
+ NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this);
+ // For class NTTPs, ensure we include the 'const' so the type matches that
+ // of a real template argument.
+ // FIXME: It would be more faithful to model this as something like an
+ // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
+ if (T->isRecordType())
+ T.addConst();
Expr *E = new (*this) DeclRefExpr(
- *this, NTTP, /*enclosing*/ false,
- NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this),
+ *this, NTTP, /*enclosing*/ false, T,
Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
if (NTTP->isParameterPack())
@@ -4755,37 +4942,27 @@ ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params,
}
QualType ASTContext::getPackExpansionType(QualType Pattern,
- Optional<unsigned> NumExpansions) {
+ Optional<unsigned> NumExpansions,
+ bool ExpectPackInType) {
+ assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
+ "Pack expansions must expand one or more parameter packs");
+
llvm::FoldingSetNodeID ID;
PackExpansionType::Profile(ID, Pattern, NumExpansions);
- // A deduced type can deduce to a pack, eg
- // auto ...x = some_pack;
- // That declaration isn't (yet) valid, but is created as part of building an
- // init-capture pack:
- // [...x = some_pack] {}
- assert((Pattern->containsUnexpandedParameterPack() ||
- Pattern->getContainedDeducedType()) &&
- "Pack expansions must expand one or more parameter packs");
void *InsertPos = nullptr;
- PackExpansionType *T
- = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
QualType Canon;
if (!Pattern.isCanonical()) {
- Canon = getCanonicalType(Pattern);
- // The canonical type might not contain an unexpanded parameter pack, if it
- // contains an alias template specialization which ignores one of its
- // parameters.
- if (Canon->containsUnexpandedParameterPack()) {
- Canon = getPackExpansionType(Canon, NumExpansions);
+ Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions,
+ /*ExpectPackInType=*/false);
- // Find the insert position again, in case we inserted an element into
- // PackExpansionTypes and invalidated our insert position.
- PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
- }
+ // Find the insert position again, in case we inserted an element into
+ // PackExpansionTypes and invalidated our insert position.
+ PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
}
T = new (*this, TypeAlignment)
@@ -7062,6 +7239,9 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
case BuiltinType::OCLReserveID:
case BuiltinType::OCLSampler:
case BuiltinType::Dependent:
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
#define BUILTIN_TYPE(KIND, ID)
#define PLACEHOLDER_TYPE(KIND, ID) \
case BuiltinType::KIND:
@@ -7498,7 +7678,9 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
}
unsigned i = 0;
- for (auto *Field : RDecl->fields()) {
+ for (FieldDecl *Field : RDecl->fields()) {
+ if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this))
+ continue;
uint64_t offs = layout.getFieldOffset(i);
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
std::make_pair(offs, Field));
@@ -8354,12 +8536,90 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
First->getVectorKind() != VectorType::AltiVecPixel &&
First->getVectorKind() != VectorType::AltiVecBool &&
Second->getVectorKind() != VectorType::AltiVecPixel &&
- Second->getVectorKind() != VectorType::AltiVecBool)
+ Second->getVectorKind() != VectorType::AltiVecBool &&
+ First->getVectorKind() != VectorType::SveFixedLengthDataVector &&
+ First->getVectorKind() != VectorType::SveFixedLengthPredicateVector &&
+ Second->getVectorKind() != VectorType::SveFixedLengthDataVector &&
+ Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector)
return true;
return false;
}
+bool ASTContext::areCompatibleSveTypes(QualType FirstType,
+ QualType SecondType) {
+ assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||
+ (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) &&
+ "Expected SVE builtin type and vector type!");
+
+ auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
+ if (const auto *BT = FirstType->getAs<BuiltinType>()) {
+ if (const auto *VT = SecondType->getAs<VectorType>()) {
+ // Predicates have the same representation as uint8 so we also have to
+ // check the kind to make these types incompatible.
+ if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ return BT->getKind() == BuiltinType::SveBool;
+ else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector)
+ return VT->getElementType().getCanonicalType() ==
+ FirstType->getSveEltType(*this);
+ else if (VT->getVectorKind() == VectorType::GenericVector)
+ return getTypeSize(SecondType) == getLangOpts().ArmSveVectorBits &&
+ hasSameType(VT->getElementType(),
+ getBuiltinVectorTypeInfo(BT).ElementType);
+ }
+ }
+ return false;
+ };
+
+ return IsValidCast(FirstType, SecondType) ||
+ IsValidCast(SecondType, FirstType);
+}
+
+bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
+ QualType SecondType) {
+ assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||
+ (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) &&
+ "Expected SVE builtin type and vector type!");
+
+ auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
+ if (!FirstType->getAs<BuiltinType>())
+ return false;
+
+ const auto *VecTy = SecondType->getAs<VectorType>();
+ if (VecTy &&
+ (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector ||
+ VecTy->getVectorKind() == VectorType::GenericVector)) {
+ const LangOptions::LaxVectorConversionKind LVCKind =
+ getLangOpts().getLaxVectorConversions();
+
+ // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
+ // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly
+ // converts to VLAT and VLAT implicitly converts to GNUT."
+ // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
+ // predicates.
+ if (VecTy->getVectorKind() == VectorType::GenericVector &&
+ getTypeSize(SecondType) != getLangOpts().ArmSveVectorBits)
+ return false;
+
+ // If -flax-vector-conversions=all is specified, the types are
+ // certainly compatible.
+ if (LVCKind == LangOptions::LaxVectorConversionKind::All)
+ return true;
+
+ // If -flax-vector-conversions=integer is specified, the types are
+ // compatible if the elements are integer types.
+ if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
+ return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
+ FirstType->getSveEltType(*this)->isIntegerType();
+ }
+
+ return false;
+ };
+
+ return IsLaxCompatible(FirstType, SecondType) ||
+ IsLaxCompatible(SecondType, FirstType);
+}
+
bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
while (true) {
// __strong id
@@ -9255,8 +9515,8 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
// designates the object or function denoted by the reference, and the
// expression is an lvalue unless the reference is an rvalue reference and
// the expression is a function call (possibly inside parentheses).
- assert(!LHS->getAs<ReferenceType>() && "LHS is a reference type?");
- assert(!RHS->getAs<ReferenceType>() && "RHS is a reference type?");
+ if (LHS->getAs<ReferenceType>() || RHS->getAs<ReferenceType>())
+ return {};
if (Unqualified) {
LHS = LHS.getUnqualifiedType();
@@ -9471,17 +9731,15 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
const ConstantArrayType* CAT)
-> std::pair<bool,llvm::APInt> {
if (VAT) {
- llvm::APSInt TheInt;
+ Optional<llvm::APSInt> TheInt;
Expr *E = VAT->getSizeExpr();
- if (E && E->isIntegerConstantExpr(TheInt, *this))
- return std::make_pair(true, TheInt);
- else
- return std::make_pair(false, TheInt);
- } else if (CAT) {
- return std::make_pair(true, CAT->getSize());
- } else {
- return std::make_pair(false, llvm::APInt());
+ if (E && (TheInt = E->getIntegerConstantExpr(*this)))
+ return std::make_pair(true, *TheInt);
+ return std::make_pair(false, llvm::APSInt());
}
+ if (CAT)
+ return std::make_pair(true, CAT->getSize());
+ return std::make_pair(false, llvm::APInt());
};
bool HaveLSize, HaveRSize;
@@ -9760,6 +10018,11 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
return UnsignedLongLongTy;
case BuiltinType::Int128:
return UnsignedInt128Ty;
+ // wchar_t is special. It is either signed or not, but when it's signed,
+ // there's no matching "unsigned wchar_t". Therefore we return the unsigned
+ // version of it's underlying type instead.
+ case BuiltinType::WChar_S:
+ return getUnsignedWCharType();
case BuiltinType::ShortAccum:
return UnsignedShortAccumTy;
@@ -10136,6 +10399,18 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
return Type;
}
+// On some targets such as PowerPC, some of the builtins are defined with custom
+// type decriptors for target-dependent types. These descriptors are decoded in
+// other functions, but it may be useful to be able to fall back to default
+// descriptor decoding to define builtins mixing target-dependent and target-
+// independent types. This function allows decoding one type descriptor with
+// default decoding.
+QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context,
+ GetBuiltinTypeError &Error, bool &RequireICE,
+ bool AllowTypeModifiers) const {
+ return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers);
+}
+
/// GetBuiltinType - Return the type for the specified builtin.
QualType ASTContext::GetBuiltinType(unsigned Id,
GetBuiltinTypeError &Error,
@@ -10273,12 +10548,17 @@ static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
} else if (D->hasAttr<DLLExportAttr>()) {
if (L == GVA_DiscardableODR)
return GVA_StrongODR;
- } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice &&
- D->hasAttr<CUDAGlobalAttr>()) {
+ } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) {
// Device-side functions with __global__ attribute must always be
// visible externally so they can be launched from host.
- if (L == GVA_DiscardableODR || L == GVA_Internal)
+ if (D->hasAttr<CUDAGlobalAttr>() &&
+ (L == GVA_DiscardableODR || L == GVA_Internal))
return GVA_StrongODR;
+ // Single source offloading languages like CUDA/HIP need to be able to
+ // access static device variables from host code of the same compilation
+ // unit. This is done by externalizing the static variable.
+ if (Context.shouldExternalizeStaticVar(D))
+ return GVA_StrongExternal;
}
return L;
}
@@ -10428,37 +10708,6 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
else
return false;
- if (D->isFromASTFile() && !LangOpts.BuildingPCHWithObjectFile) {
- assert(getExternalSource() && "It's from an AST file; must have a source.");
- // On Windows, PCH files are built together with an object file. If this
- // declaration comes from such a PCH and DeclMustBeEmitted would return
- // true, it would have returned true and the decl would have been emitted
- // into that object file, so it doesn't need to be emitted here.
- // Note that decls are still emitted if they're referenced, as usual;
- // DeclMustBeEmitted is used to decide whether a decl must be emitted even
- // if it's not referenced.
- //
- // Explicit template instantiation definitions are tricky. If there was an
- // explicit template instantiation decl in the PCH before, it will look like
- // the definition comes from there, even if that was just the declaration.
- // (Explicit instantiation defs of variable templates always get emitted.)
- bool IsExpInstDef =
- isa<FunctionDecl>(D) &&
- cast<FunctionDecl>(D)->getTemplateSpecializationKind() ==
- TSK_ExplicitInstantiationDefinition;
-
- // Implicit member function definitions, such as operator= might not be
- // marked as template specializations, since they're not coming from a
- // template but synthesized directly on the class.
- IsExpInstDef |=
- isa<CXXMethodDecl>(D) &&
- cast<CXXMethodDecl>(D)->getParent()->getTemplateSpecializationKind() ==
- TSK_ExplicitInstantiationDefinition;
-
- if (getExternalSource()->DeclIsFromPCHWithObjectFile(D) && !IsExpInstDef)
- return false;
- }
-
// If this is a member of a class template, we do not need to emit it.
if (D->getDeclContext()->isDependentContext())
return false;
@@ -10623,13 +10872,13 @@ MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
if (!T)
T = Target;
switch (T->getCXXABI().getKind()) {
+ case TargetCXXABI::AppleARM64:
case TargetCXXABI::Fuchsia:
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::GenericARM:
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::iOS:
- case TargetCXXABI::iOS64:
case TargetCXXABI::WebAssembly:
case TargetCXXABI::WatchOS:
case TargetCXXABI::XL:
@@ -10823,6 +11072,27 @@ ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
return New;
}
+TemplateParamObjectDecl *
+ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
+ assert(T->isRecordType() && "template param object of unexpected type");
+
+ // C++ [temp.param]p8:
+ // [...] a static storage duration object of type 'const T' [...]
+ T.addConst();
+
+ llvm::FoldingSetNodeID ID;
+ TemplateParamObjectDecl::Profile(ID, T, V);
+
+ void *InsertPos;
+ if (TemplateParamObjectDecl *Existing =
+ TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos))
+ return Existing;
+
+ TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V);
+ TemplateParamObjectDecls.InsertNode(New, InsertPos);
+ return New;
+}
+
bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
const llvm::Triple &T = getTargetInfo().getTriple();
if (!T.isOSDarwin())
@@ -11030,29 +11300,30 @@ unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
}
}
-FixedPointSemantics ASTContext::getFixedPointSemantics(QualType Ty) const {
+llvm::FixedPointSemantics
+ASTContext::getFixedPointSemantics(QualType Ty) const {
assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&
"Can only get the fixed point semantics for a "
"fixed point or integer type.");
if (Ty->isIntegerType())
- return FixedPointSemantics::GetIntegerSemantics(getIntWidth(Ty),
- Ty->isSignedIntegerType());
+ return llvm::FixedPointSemantics::GetIntegerSemantics(
+ getIntWidth(Ty), Ty->isSignedIntegerType());
bool isSigned = Ty->isSignedFixedPointType();
- return FixedPointSemantics(
+ return llvm::FixedPointSemantics(
static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned,
Ty->isSaturatedFixedPointType(),
!isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding());
}
-APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
+llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
assert(Ty->isFixedPointType());
- return APFixedPoint::getMax(getFixedPointSemantics(Ty));
+ return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty));
}
-APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
+llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
assert(Ty->isFixedPointType());
- return APFixedPoint::getMin(getFixedPointSemantics(Ty));
+ return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty));
}
QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
@@ -11147,8 +11418,7 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
} else {
- Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU,
- Target->getTargetOpts().Features);
+ FeatureMap = Target->getTargetOpts().FeatureMap;
}
}
@@ -11157,10 +11427,25 @@ OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
return *OMPTraitInfoVector.back();
}
-const DiagnosticBuilder &
-clang::operator<<(const DiagnosticBuilder &DB,
- const ASTContext::SectionInfo &Section) {
+const StreamingDiagnostic &clang::
+operator<<(const StreamingDiagnostic &DB,
+ const ASTContext::SectionInfo &Section) {
if (Section.Decl)
return DB << Section.Decl;
return DB << "a prior #pragma section";
}
+
+bool ASTContext::mayExternalizeStaticVar(const Decl *D) const {
+ return !getLangOpts().GPURelocatableDeviceCode &&
+ ((D->hasAttr<CUDADeviceAttr>() &&
+ !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
+ (D->hasAttr<CUDAConstantAttr>() &&
+ !D->getAttr<CUDAConstantAttr>()->isImplicit())) &&
+ isa<VarDecl>(D) && cast<VarDecl>(D)->isFileVarDecl() &&
+ cast<VarDecl>(D)->getStorageClass() == SC_Static;
+}
+
+bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const {
+ return mayExternalizeStaticVar(D) &&
+ CUDAStaticDeviceVarReferencedByHost.count(cast<VarDecl>(D));
+}
diff --git a/clang/lib/AST/ASTDiagnostic.cpp b/clang/lib/AST/ASTDiagnostic.cpp
index 05adf226bae3..2bc731717b98 100644
--- a/clang/lib/AST/ASTDiagnostic.cpp
+++ b/clang/lib/AST/ASTDiagnostic.cpp
@@ -1560,11 +1560,11 @@ class TemplateDiff {
if (!Tree.HasChildren()) {
// If we're dealing with a template specialization with zero
// arguments, there are no children; special-case this.
- OS << FromTD->getNameAsString() << "<>";
+ OS << FromTD->getDeclName() << "<>";
return;
}
- OS << FromTD->getNameAsString() << '<';
+ OS << FromTD->getDeclName() << '<';
Tree.MoveToChild();
unsigned NumElideArgs = 0;
bool AllArgsElided = true;
@@ -1724,7 +1724,7 @@ class TemplateDiff {
}
if (Same) {
- OS << "template " << FromTD->getNameAsString();
+ OS << "template " << FromTD->getDeclName();
} else if (!PrintTree) {
OS << (FromDefault ? "(default) template " : "template ");
Bold();
@@ -1834,7 +1834,14 @@ class TemplateDiff {
if (VD) {
if (AddressOf)
OS << "&";
- OS << VD->getName();
+ else if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(VD)) {
+ // FIXME: Diffing the APValue would be neat.
+ // FIXME: Suppress this and use the full name of the declaration if the
+ // parameter is a pointer or reference.
+ TPO->printAsInit(OS);
+ return;
+ }
+ VD->printName(OS);
return;
}
diff --git a/clang/lib/AST/ASTDumper.cpp b/clang/lib/AST/ASTDumper.cpp
index 284e5bdbc6b0..3d368a0a7b63 100644
--- a/clang/lib/AST/ASTDumper.cpp
+++ b/clang/lib/AST/ASTDumper.cpp
@@ -129,9 +129,11 @@ void ASTDumper::dumpTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst) {
Visit(D->getTemplatedDecl());
- for (const auto *Child : D->specializations())
- dumpTemplateDeclSpecialization(Child, DumpExplicitInst,
- !D->isCanonicalDecl());
+ if (GetTraversalKind() == TK_AsIs) {
+ for (const auto *Child : D->specializations())
+ dumpTemplateDeclSpecialization(Child, DumpExplicitInst,
+ !D->isCanonicalDecl());
+ }
}
void ASTDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index 3779e0cb872b..085c50c0667b 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -202,6 +202,9 @@ namespace clang {
return *MaybeVal;
}
+ ExplicitSpecifier importExplicitSpecifier(Error &Err,
+ ExplicitSpecifier ESpec);
+
// Wrapper for an overload set.
template <typename ToDeclT> struct CallOverloadedCreateFun {
template <typename... Args> decltype(auto) operator()(Args &&... args) {
@@ -261,16 +264,6 @@ namespace clang {
void InitializeImportedDecl(Decl *FromD, Decl *ToD) {
ToD->IdentifierNamespace = FromD->IdentifierNamespace;
- if (FromD->hasAttrs())
- for (const Attr *FromAttr : FromD->getAttrs()) {
- // FIXME: Return of the error here is not possible until store of
- // import errors is implemented.
- auto ToAttrOrErr = import(FromAttr);
- if (ToAttrOrErr)
- ToD->addAttr(*ToAttrOrErr);
- else
- llvm::consumeError(ToAttrOrErr.takeError());
- }
if (FromD->isUsed())
ToD->setIsUsed();
if (FromD->isImplicit())
@@ -385,6 +378,8 @@ namespace clang {
ExpectedType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
// Importing declarations
+ Error ImportDeclParts(NamedDecl *D, DeclarationName &Name, NamedDecl *&ToD,
+ SourceLocation &Loc);
Error ImportDeclParts(
NamedDecl *D, DeclContext *&DC, DeclContext *&LexicalDC,
DeclarationName &Name, NamedDecl *&ToD, SourceLocation &Loc);
@@ -397,6 +392,7 @@ namespace clang {
Error ImportImplicitMethods(const CXXRecordDecl *From, CXXRecordDecl *To);
Expected<CXXCastPath> ImportCastPath(CastExpr *E);
+ Expected<APValue> ImportAPValue(const APValue &FromValue);
using Designator = DesignatedInitExpr::Designator;
@@ -498,6 +494,7 @@ namespace clang {
ExpectedDecl VisitCXXConstructorDecl(CXXConstructorDecl *D);
ExpectedDecl VisitCXXDestructorDecl(CXXDestructorDecl *D);
ExpectedDecl VisitCXXConversionDecl(CXXConversionDecl *D);
+ ExpectedDecl VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D);
ExpectedDecl VisitFieldDecl(FieldDecl *D);
ExpectedDecl VisitIndirectFieldDecl(IndirectFieldDecl *D);
ExpectedDecl VisitFriendDecl(FriendDecl *D);
@@ -580,6 +577,7 @@ namespace clang {
ExpectedStmt VisitVAArgExpr(VAArgExpr *E);
ExpectedStmt VisitChooseExpr(ChooseExpr *E);
ExpectedStmt VisitGNUNullExpr(GNUNullExpr *E);
+ ExpectedStmt VisitGenericSelectionExpr(GenericSelectionExpr *E);
ExpectedStmt VisitPredefinedExpr(PredefinedExpr *E);
ExpectedStmt VisitDeclRefExpr(DeclRefExpr *E);
ExpectedStmt VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
@@ -646,6 +644,7 @@ namespace clang {
ExpectedStmt VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E);
ExpectedStmt VisitTypeTraitExpr(TypeTraitExpr *E);
ExpectedStmt VisitCXXTypeidExpr(CXXTypeidExpr *E);
+ ExpectedStmt VisitCXXFoldExpr(CXXFoldExpr *E);
template<typename IIter, typename OIter>
Error ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) {
@@ -882,11 +881,9 @@ ASTNodeImporter::import(const TemplateArgumentLoc &TALoc) {
import(FromInfo.getTemplateEllipsisLoc());
if (!ToTemplateEllipsisLocOrErr)
return ToTemplateEllipsisLocOrErr.takeError();
-
ToInfo = TemplateArgumentLocInfo(
- *ToTemplateQualifierLocOrErr,
- *ToTemplateNameLocOrErr,
- *ToTemplateEllipsisLocOrErr);
+ Importer.getToContext(), *ToTemplateQualifierLocOrErr,
+ *ToTemplateNameLocOrErr, *ToTemplateEllipsisLocOrErr);
}
return TemplateArgumentLoc(Arg, ToInfo);
@@ -1036,6 +1033,10 @@ ExpectedType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
case BuiltinType::Id: \
return Importer.getToContext().SingletonId;
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id: \
+ return Importer.getToContext().Id##Ty;
+#include "clang/Basic/PPCTypes.def"
#define SHARED_SINGLETON_TYPE(Expansion)
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id: return Importer.getToContext().SingletonId;
@@ -1498,7 +1499,8 @@ ASTNodeImporter::VisitPackExpansionType(const PackExpansionType *T) {
return ToPatternOrErr.takeError();
return Importer.getToContext().getPackExpansionType(*ToPatternOrErr,
- T->getNumExpansions());
+ T->getNumExpansions(),
+ /*ExpactPack=*/false);
}
ExpectedType ASTNodeImporter::VisitDependentTemplateSpecializationType(
@@ -1639,6 +1641,25 @@ Error ASTNodeImporter::ImportDeclParts(
return Error::success();
}
+Error ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclarationName &Name,
+ NamedDecl *&ToD, SourceLocation &Loc) {
+
+ // Import the name of this declaration.
+ if (Error Err = importInto(Name, D->getDeclName()))
+ return Err;
+
+ // Import the location of this declaration.
+ if (Error Err = importInto(Loc, D->getLocation()))
+ return Err;
+
+ ToD = cast_or_null<NamedDecl>(Importer.GetAlreadyImportedOrNull(D));
+ if (ToD)
+ if (Error Err = ASTNodeImporter(*this).ImportDefinitionIfNeeded(D, ToD))
+ return Err;
+
+ return Error::success();
+}
+
Error ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
if (!FromD)
return Error::success();
@@ -1743,12 +1764,28 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
Decl *ImportedDecl = *ImportedOrErr;
FieldDecl *FieldTo = dyn_cast_or_null<FieldDecl>(ImportedDecl);
if (FieldFrom && FieldTo) {
- const RecordType *RecordFrom = FieldFrom->getType()->getAs<RecordType>();
- const RecordType *RecordTo = FieldTo->getType()->getAs<RecordType>();
- if (RecordFrom && RecordTo) {
- RecordDecl *FromRecordDecl = RecordFrom->getDecl();
- RecordDecl *ToRecordDecl = RecordTo->getDecl();
+ RecordDecl *FromRecordDecl = nullptr;
+ RecordDecl *ToRecordDecl = nullptr;
+ // If we have a field that is an ArrayType we need to check if the array
+ // element is a RecordDecl and if so we need to import the defintion.
+ if (FieldFrom->getType()->isArrayType()) {
+ // getBaseElementTypeUnsafe(...) handles multi-dimensonal arrays for us.
+ FromRecordDecl = FieldFrom->getType()->getBaseElementTypeUnsafe()->getAsRecordDecl();
+ ToRecordDecl = FieldTo->getType()->getBaseElementTypeUnsafe()->getAsRecordDecl();
+ }
+
+ if (!FromRecordDecl || !ToRecordDecl) {
+ const RecordType *RecordFrom =
+ FieldFrom->getType()->getAs<RecordType>();
+ const RecordType *RecordTo = FieldTo->getType()->getAs<RecordType>();
+
+ if (RecordFrom && RecordTo) {
+ FromRecordDecl = RecordFrom->getDecl();
+ ToRecordDecl = RecordTo->getDecl();
+ }
+ }
+ if (FromRecordDecl && ToRecordDecl) {
if (FromRecordDecl->isCompleteDefinition() &&
!ToRecordDecl->isCompleteDefinition()) {
Error Err = ImportDefinition(FromRecordDecl, ToRecordDecl);
@@ -1900,7 +1937,8 @@ Error ASTNodeImporter::ImportDefinition(
else
return ToCaptureOrErr.takeError();
}
- cast<CXXRecordDecl>(To)->setCaptures(ToCaptures);
+ cast<CXXRecordDecl>(To)->setCaptures(Importer.getToContext(),
+ ToCaptures);
}
Error Result = ImportDeclContext(From, /*ForceImport=*/true);
@@ -1998,10 +2036,11 @@ Error ASTNodeImporter::ImportInitializer(VarDecl *From, VarDecl *To) {
return ToInitOrErr.takeError();
To->setInit(*ToInitOrErr);
- if (From->isInitKnownICE()) {
- EvaluatedStmt *Eval = To->ensureEvaluatedStmt();
- Eval->CheckedICE = true;
- Eval->IsICE = From->isInitICE();
+ if (EvaluatedStmt *FromEval = From->getEvaluatedStmt()) {
+ EvaluatedStmt *ToEval = To->ensureEvaluatedStmt();
+ ToEval->HasConstantInitialization = FromEval->HasConstantInitialization;
+ ToEval->HasConstantDestruction = FromEval->HasConstantDestruction;
+ // FIXME: Also import the initializer value.
}
// FIXME: Other bits to merge?
@@ -2389,22 +2428,29 @@ ExpectedDecl ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
ExpectedDecl
ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
// Import the major distinguishing characteristics of this typedef.
- DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ // Do not import the DeclContext, we will import it once the TypedefNameDecl
+ // is created.
+ if (Error Err = ImportDeclParts(D, Name, ToD, Loc))
return std::move(Err);
if (ToD)
return ToD;
+ DeclContext *DC = cast_or_null<DeclContext>(
+ Importer.GetAlreadyImportedOrNull(cast<Decl>(D->getDeclContext())));
+ DeclContext *LexicalDC =
+ cast_or_null<DeclContext>(Importer.GetAlreadyImportedOrNull(
+ cast<Decl>(D->getLexicalDeclContext())));
+
// If this typedef is not in block scope, determine whether we've
// seen a typedef with the same name (that we can merge with) or any
// other entity by that name (which name lookup could conflict with).
// Note: Repeated typedefs are not valid in C99:
// 'typedef int T; typedef int T;' is invalid
// We do not care about this now.
- if (!DC->isFunctionOrMethod()) {
+ if (DC && !DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
@@ -2461,8 +2507,15 @@ ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
Name.getAsIdentifierInfo(), ToTypeSourceInfo))
return ToTypedef;
- ToTypedef->setAccess(D->getAccess());
+ // Import the DeclContext and set it to the Typedef.
+ if ((Err = ImportDeclContext(D, DC, LexicalDC)))
+ return std::move(Err);
+ ToTypedef->setDeclContext(DC);
ToTypedef->setLexicalDeclContext(LexicalDC);
+ // Add to the lookupTable because we could not do that in MapImported.
+ Importer.AddToLookupTable(ToTypedef);
+
+ ToTypedef->setAccess(D->getAccess());
// Templated declarations should not appear in DeclContext.
TypeAliasDecl *FromAlias = IsAlias ? cast<TypeAliasDecl>(D) : nullptr;
@@ -3130,6 +3183,14 @@ bool ASTNodeImporter::hasAutoReturnTypeDeclaredInside(FunctionDecl *D) {
return false;
}
+ExplicitSpecifier
+ASTNodeImporter::importExplicitSpecifier(Error &Err, ExplicitSpecifier ESpec) {
+ Expr *ExplicitExpr = ESpec.getExpr();
+ if (ExplicitExpr)
+ ExplicitExpr = importChecked(Err, ESpec.getExpr());
+ return ExplicitSpecifier(ExplicitExpr, ESpec.getKind());
+}
+
ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
SmallVector<Decl *, 2> Redecls = getCanonicalForwardRedeclChain(D);
@@ -3309,20 +3370,14 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// Create the imported function.
FunctionDecl *ToFunction = nullptr;
if (auto *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
- Expr *ExplicitExpr = nullptr;
- if (FromConstructor->getExplicitSpecifier().getExpr()) {
- auto Imp = import(FromConstructor->getExplicitSpecifier().getExpr());
- if (!Imp)
- return Imp.takeError();
- ExplicitExpr = *Imp;
- }
+ ExplicitSpecifier ESpec =
+ importExplicitSpecifier(Err, FromConstructor->getExplicitSpecifier());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl<CXXConstructorDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- ToInnerLocStart, NameInfo, T, TInfo,
- ExplicitSpecifier(
- ExplicitExpr,
- FromConstructor->getExplicitSpecifier().getKind()),
- D->isInlineSpecified(), D->isImplicit(), D->getConstexprKind(),
+ ToInnerLocStart, NameInfo, T, TInfo, ESpec, D->isInlineSpecified(),
+ D->isImplicit(), D->getConstexprKind(),
InheritedConstructor(), // FIXME: Properly import inherited
// constructor info
TrailingRequiresClause))
@@ -3347,18 +3402,13 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
ToDtor->setOperatorDelete(ToOperatorDelete, ToThisArg);
} else if (CXXConversionDecl *FromConversion =
dyn_cast<CXXConversionDecl>(D)) {
- Expr *ExplicitExpr = nullptr;
- if (FromConversion->getExplicitSpecifier().getExpr()) {
- auto Imp = import(FromConversion->getExplicitSpecifier().getExpr());
- if (!Imp)
- return Imp.takeError();
- ExplicitExpr = *Imp;
- }
+ ExplicitSpecifier ESpec =
+ importExplicitSpecifier(Err, FromConversion->getExplicitSpecifier());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl<CXXConversionDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
- ExplicitSpecifier(ExplicitExpr,
- FromConversion->getExplicitSpecifier().getKind()),
+ ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(), ESpec,
D->getConstexprKind(), SourceLocation(), TrailingRequiresClause))
return ToFunction;
} else if (auto *Method = dyn_cast<CXXMethodDecl>(D)) {
@@ -3368,6 +3418,17 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
Method->isInlineSpecified(), D->getConstexprKind(),
SourceLocation(), TrailingRequiresClause))
return ToFunction;
+ } else if (auto *Guide = dyn_cast<CXXDeductionGuideDecl>(D)) {
+ ExplicitSpecifier ESpec =
+ importExplicitSpecifier(Err, Guide->getExplicitSpecifier());
+ if (Err)
+ return std::move(Err);
+ if (GetImportedOrCreateDecl<CXXDeductionGuideDecl>(
+ ToFunction, D, Importer.getToContext(), DC, ToInnerLocStart, ESpec,
+ NameInfo, T, TInfo, ToEndLoc))
+ return ToFunction;
+ cast<CXXDeductionGuideDecl>(ToFunction)
+ ->setIsCopyDeductionCandidate(Guide->isCopyDeductionCandidate());
} else {
if (GetImportedOrCreateDecl(
ToFunction, D, Importer.getToContext(), DC, ToInnerLocStart,
@@ -3495,6 +3556,11 @@ ExpectedDecl ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
return VisitCXXMethodDecl(D);
}
+ExpectedDecl
+ASTNodeImporter::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
+ return VisitFunctionDecl(D);
+}
+
ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
@@ -4758,11 +4824,10 @@ Error ASTNodeImporter::ImportDefinition(
return ToImplOrErr.takeError();
}
- if (shouldForceImportDeclContext(Kind)) {
- // Import all of the members of this class.
- if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
- return Err;
- }
+ // Import all of the members of this class.
+ if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
+ return Err;
+
return Error::success();
}
@@ -5137,8 +5202,6 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
// context. This context will be fixed when the actual template declaration
// is created.
- // FIXME: Import default argument and constraint expression.
-
ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
if (!BeginLocOrErr)
return BeginLocOrErr.takeError();
@@ -5185,6 +5248,14 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
ToIDC);
}
+ if (D->hasDefaultArgument()) {
+ Expected<TypeSourceInfo *> ToDefaultArgOrErr =
+ import(D->getDefaultArgumentInfo());
+ if (!ToDefaultArgOrErr)
+ return ToDefaultArgOrErr.takeError();
+ ToD->setDefaultArgument(*ToDefaultArgOrErr);
+ }
+
return ToD;
}
@@ -5200,15 +5271,22 @@ ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
if (Err)
return std::move(Err);
- // FIXME: Import default argument.
-
NonTypeTemplateParmDecl *ToD = nullptr;
- (void)GetImportedOrCreateDecl(
- ToD, D, Importer.getToContext(),
- Importer.getToContext().getTranslationUnitDecl(),
- ToInnerLocStart, ToLocation, D->getDepth(),
- D->getPosition(), ToDeclName.getAsIdentifierInfo(), ToType,
- D->isParameterPack(), ToTypeSourceInfo);
+ if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ ToInnerLocStart, ToLocation, D->getDepth(),
+ D->getPosition(),
+ ToDeclName.getAsIdentifierInfo(), ToType,
+ D->isParameterPack(), ToTypeSourceInfo))
+ return ToD;
+
+ if (D->hasDefaultArgument()) {
+ ExpectedExpr ToDefaultArgOrErr = import(D->getDefaultArgument());
+ if (!ToDefaultArgOrErr)
+ return ToDefaultArgOrErr.takeError();
+ ToD->setDefaultArgument(*ToDefaultArgOrErr);
+ }
+
return ToD;
}
@@ -5229,15 +5307,22 @@ ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
if (!TemplateParamsOrErr)
return TemplateParamsOrErr.takeError();
- // FIXME: Import default argument.
-
TemplateTemplateParmDecl *ToD = nullptr;
- (void)GetImportedOrCreateDecl(
- ToD, D, Importer.getToContext(),
- Importer.getToContext().getTranslationUnitDecl(), *LocationOrErr,
- D->getDepth(), D->getPosition(), D->isParameterPack(),
- (*NameOrErr).getAsIdentifierInfo(),
- *TemplateParamsOrErr);
+ if (GetImportedOrCreateDecl(
+ ToD, D, Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(), *LocationOrErr,
+ D->getDepth(), D->getPosition(), D->isParameterPack(),
+ (*NameOrErr).getAsIdentifierInfo(), *TemplateParamsOrErr))
+ return ToD;
+
+ if (D->hasDefaultArgument()) {
+ Expected<TemplateArgumentLoc> ToDefaultArgOrErr =
+ import(D->getDefaultArgument());
+ if (!ToDefaultArgOrErr)
+ return ToDefaultArgOrErr.takeError();
+ ToD->setDefaultArgument(Importer.getToContext(), *ToDefaultArgOrErr);
+ }
+
return ToD;
}
@@ -5309,16 +5394,16 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
CXXRecordDecl *FromTemplated = D->getTemplatedDecl();
+ auto TemplateParamsOrErr = import(D->getTemplateParameters());
+ if (!TemplateParamsOrErr)
+ return TemplateParamsOrErr.takeError();
+
// Create the declaration that is being templated.
CXXRecordDecl *ToTemplated;
if (Error Err = importInto(ToTemplated, FromTemplated))
return std::move(Err);
// Create the class template declaration itself.
- auto TemplateParamsOrErr = import(D->getTemplateParameters());
- if (!TemplateParamsOrErr)
- return TemplateParamsOrErr.takeError();
-
ClassTemplateDecl *D2;
if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, Loc, Name,
*TemplateParamsOrErr, ToTemplated))
@@ -5402,8 +5487,9 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
if (PrevDecl) {
if (IsStructuralMatch(D, PrevDecl)) {
- if (D->isThisDeclarationADefinition() && PrevDecl->getDefinition()) {
- Importer.MapImported(D, PrevDecl->getDefinition());
+ CXXRecordDecl *PrevDefinition = PrevDecl->getDefinition();
+ if (D->isThisDeclarationADefinition() && PrevDefinition) {
+ Importer.MapImported(D, PrevDefinition);
// Import those default field initializers which have been
// instantiated in the "From" context, but not in the "To" context.
for (auto *FromField : D->fields()) {
@@ -5425,7 +5511,7 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
//
// Generally, ASTCommon.h/DeclUpdateKind enum gives a very good hint
// what else could be fused during an AST merge.
- return PrevDecl;
+ return PrevDefinition;
}
} else { // ODR violation.
// FIXME HandleNameConflict
@@ -6066,6 +6152,8 @@ ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) {
auto ToInit = importChecked(Err, S->getInit());
auto ToConditionVariable = importChecked(Err, S->getConditionVariable());
auto ToCond = importChecked(Err, S->getCond());
+ auto ToLParenLoc = importChecked(Err, S->getLParenLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
auto ToThen = importChecked(Err, S->getThen());
auto ToElseLoc = importChecked(Err, S->getElseLoc());
auto ToElse = importChecked(Err, S->getElse());
@@ -6073,8 +6161,8 @@ ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) {
return std::move(Err);
return IfStmt::Create(Importer.getToContext(), ToIfLoc, S->isConstexpr(),
- ToInit, ToConditionVariable, ToCond, ToThen, ToElseLoc,
- ToElse);
+ ToInit, ToConditionVariable, ToCond, ToLParenLoc,
+ ToRParenLoc, ToThen, ToElseLoc, ToElse);
}
ExpectedStmt ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
@@ -6083,13 +6171,16 @@ ExpectedStmt ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
auto ToInit = importChecked(Err, S->getInit());
auto ToConditionVariable = importChecked(Err, S->getConditionVariable());
auto ToCond = importChecked(Err, S->getCond());
+ auto ToLParenLoc = importChecked(Err, S->getLParenLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
auto ToBody = importChecked(Err, S->getBody());
auto ToSwitchLoc = importChecked(Err, S->getSwitchLoc());
if (Err)
return std::move(Err);
- auto *ToStmt = SwitchStmt::Create(Importer.getToContext(), ToInit,
- ToConditionVariable, ToCond);
+ auto *ToStmt =
+ SwitchStmt::Create(Importer.getToContext(), ToInit, ToConditionVariable,
+ ToCond, ToLParenLoc, ToRParenLoc);
ToStmt->setBody(ToBody);
ToStmt->setSwitchLoc(ToSwitchLoc);
@@ -6437,6 +6528,40 @@ ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
return new (Importer.getToContext()) GNUNullExpr(*TypeOrErr, *BeginLocOrErr);
}
+ExpectedStmt
+ASTNodeImporter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
+ Error Err = Error::success();
+ auto ToGenericLoc = importChecked(Err, E->getGenericLoc());
+ auto *ToControllingExpr = importChecked(Err, E->getControllingExpr());
+ auto ToDefaultLoc = importChecked(Err, E->getDefaultLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
+
+ ArrayRef<const TypeSourceInfo *> FromAssocTypes(E->getAssocTypeSourceInfos());
+ SmallVector<TypeSourceInfo *, 1> ToAssocTypes(FromAssocTypes.size());
+ if (Error Err = ImportContainerChecked(FromAssocTypes, ToAssocTypes))
+ return std::move(Err);
+
+ ArrayRef<const Expr *> FromAssocExprs(E->getAssocExprs());
+ SmallVector<Expr *, 1> ToAssocExprs(FromAssocExprs.size());
+ if (Error Err = ImportContainerChecked(FromAssocExprs, ToAssocExprs))
+ return std::move(Err);
+
+ const ASTContext &ToCtx = Importer.getToContext();
+ if (E->isResultDependent()) {
+ return GenericSelectionExpr::Create(
+ ToCtx, ToGenericLoc, ToControllingExpr,
+ llvm::makeArrayRef(ToAssocTypes), llvm::makeArrayRef(ToAssocExprs),
+ ToDefaultLoc, ToRParenLoc, E->containsUnexpandedParameterPack());
+ }
+
+ return GenericSelectionExpr::Create(
+ ToCtx, ToGenericLoc, ToControllingExpr, llvm::makeArrayRef(ToAssocTypes),
+ llvm::makeArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc,
+ E->containsUnexpandedParameterPack(), E->getResultIndex());
+}
+
ExpectedStmt ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
Error Err = Error::success();
@@ -6671,18 +6796,11 @@ ExpectedStmt ASTNodeImporter::VisitAddrLabelExpr(AddrLabelExpr *E) {
ExpectedStmt ASTNodeImporter::VisitConstantExpr(ConstantExpr *E) {
Error Err = Error::success();
auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToResult = importChecked(Err, E->getAPValueResult());
if (Err)
return std::move(Err);
- // TODO : Handle APValue::ValueKind that require importing.
-
- APValue::ValueKind Kind = E->getResultAPValueKind();
- if (Kind == APValue::Int || Kind == APValue::Float ||
- Kind == APValue::FixedPoint || Kind == APValue::ComplexFloat ||
- Kind == APValue::ComplexInt)
- return ConstantExpr::Create(Importer.getToContext(), ToSubExpr,
- E->getAPValueResult());
- return ConstantExpr::Create(Importer.getToContext(), ToSubExpr);
+ return ConstantExpr::Create(Importer.getToContext(), ToSubExpr, ToResult);
}
ExpectedStmt ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
Error Err = Error::success();
@@ -6924,7 +7042,7 @@ ExpectedStmt ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
return ImplicitCastExpr::Create(
Importer.getToContext(), *ToTypeOrErr, E->getCastKind(), *ToSubExprOrErr,
- &(*ToBasePathOrErr), E->getValueKind());
+ &(*ToBasePathOrErr), E->getValueKind(), E->getFPFeatures());
}
ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
@@ -6951,8 +7069,8 @@ ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
return ToRParenLocOrErr.takeError();
return CStyleCastExpr::Create(
Importer.getToContext(), ToType, E->getValueKind(), E->getCastKind(),
- ToSubExpr, ToBasePath, ToTypeInfoAsWritten, *ToLParenLocOrErr,
- *ToRParenLocOrErr);
+ ToSubExpr, ToBasePath, CCE->getFPFeatures(), ToTypeInfoAsWritten,
+ *ToLParenLocOrErr, *ToRParenLocOrErr);
}
case Stmt::CXXFunctionalCastExprClass: {
@@ -6965,8 +7083,8 @@ ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
return ToRParenLocOrErr.takeError();
return CXXFunctionalCastExpr::Create(
Importer.getToContext(), ToType, E->getValueKind(), ToTypeInfoAsWritten,
- E->getCastKind(), ToSubExpr, ToBasePath, *ToLParenLocOrErr,
- *ToRParenLocOrErr);
+ E->getCastKind(), ToSubExpr, ToBasePath, FCE->getFPFeatures(),
+ *ToLParenLocOrErr, *ToRParenLocOrErr);
}
case Stmt::ObjCBridgedCastExprClass: {
@@ -7338,7 +7456,8 @@ ExpectedStmt ASTNodeImporter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
return std::move(Err);
return CXXMemberCallExpr::Create(Importer.getToContext(), ToCallee, ToArgs,
- ToType, E->getValueKind(), ToRParenLoc);
+ ToType, E->getValueKind(), ToRParenLoc,
+ E->getFPFeatures());
}
ExpectedStmt ASTNodeImporter::VisitCXXThisExpr(CXXThisExpr *E) {
@@ -7514,17 +7633,18 @@ ExpectedStmt ASTNodeImporter::VisitCXXUnresolvedConstructExpr(
Error Err = Error::success();
auto ToLParenLoc = importChecked(Err, E->getLParenLoc());
auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToType = importChecked(Err, E->getType());
auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
if (Err)
return std::move(Err);
- SmallVector<Expr *, 8> ToArgs(E->arg_size());
+ SmallVector<Expr *, 8> ToArgs(E->getNumArgs());
if (Error Err =
ImportArrayChecked(E->arg_begin(), E->arg_end(), ToArgs.begin()))
return std::move(Err);
return CXXUnresolvedConstructExpr::Create(
- Importer.getToContext(), ToTypeSourceInfo, ToLParenLoc,
+ Importer.getToContext(), ToType, ToTypeSourceInfo, ToLParenLoc,
llvm::makeArrayRef(ToArgs), ToRParenLoc);
}
@@ -7648,8 +7768,8 @@ ExpectedStmt ASTNodeImporter::VisitCallExpr(CallExpr *E) {
}
return CallExpr::Create(Importer.getToContext(), ToCallee, ToArgs, ToType,
- E->getValueKind(), ToRParenLoc, /*MinNumArgs=*/0,
- E->getADLCallKind());
+ E->getValueKind(), ToRParenLoc, E->getFPFeatures(),
+ /*MinNumArgs=*/0, E->getADLCallKind());
}
ExpectedStmt ASTNodeImporter::VisitLambdaExpr(LambdaExpr *E) {
@@ -7808,10 +7928,11 @@ ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
if (!ToBasePathOrErr)
return ToBasePathOrErr.takeError();
- if (isa<CXXStaticCastExpr>(E)) {
+ if (auto CCE = dyn_cast<CXXStaticCastExpr>(E)) {
return CXXStaticCastExpr::Create(
Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr),
- ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
+ ToTypeInfoAsWritten, CCE->getFPFeatures(), ToOperatorLoc, ToRParenLoc,
+ ToAngleBrackets);
} else if (isa<CXXDynamicCastExpr>(E)) {
return CXXDynamicCastExpr::Create(
Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr),
@@ -7841,7 +7962,8 @@ ExpectedStmt ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr(
return std::move(Err);
return new (Importer.getToContext()) SubstNonTypeTemplateParmExpr(
- ToType, E->getValueKind(), ToExprLoc, ToParameter, ToReplacement);
+ ToType, E->getValueKind(), ToExprLoc, ToParameter,
+ E->isReferenceParameter(), ToReplacement);
}
ExpectedStmt ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) {
@@ -7890,6 +8012,25 @@ ExpectedStmt ASTNodeImporter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
*ToTypeOrErr, *ToExprOperandOrErr, *ToSourceRangeOrErr);
}
+ExpectedStmt ASTNodeImporter::VisitCXXFoldExpr(CXXFoldExpr *E) {
+ Error Err = Error::success();
+
+ QualType ToType = importChecked(Err, E->getType());
+ UnresolvedLookupExpr *ToCallee = importChecked(Err, E->getCallee());
+ SourceLocation ToLParenLoc = importChecked(Err, E->getLParenLoc());
+ Expr *ToLHS = importChecked(Err, E->getLHS());
+ SourceLocation ToEllipsisLoc = importChecked(Err, E->getEllipsisLoc());
+ Expr *ToRHS = importChecked(Err, E->getRHS());
+ SourceLocation ToRParenLoc = importChecked(Err, E->getRParenLoc());
+
+ if (Err)
+ return std::move(Err);
+
+ return new (Importer.getToContext())
+ CXXFoldExpr(ToType, ToCallee, ToLParenLoc, ToLHS, E->getOperator(),
+ ToEllipsisLoc, ToRHS, ToRParenLoc, E->getNumExpansions());
+}
+
Error ASTNodeImporter::ImportOverriddenMethods(CXXMethodDecl *ToMethod,
CXXMethodDecl *FromMethod) {
Error ImportErrors = Error::success();
@@ -8077,6 +8218,16 @@ Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) {
ToAttr = To;
break;
}
+ case attr::Format: {
+ const auto *From = cast<FormatAttr>(FromAttr);
+ FormatAttr *To;
+ IdentifierInfo *ToAttrType = Import(From->getType());
+ To = FormatAttr::Create(ToContext, ToAttrType, From->getFormatIdx(),
+ From->getFirstArg(), ToRange, From->getSyntax());
+ To->setInherited(From->isInherited());
+ ToAttr = To;
+ break;
+ }
default:
// FIXME: 'clone' copies every member but some of them should be imported.
// Handle other Attrs that have parameters that should be imported.
@@ -8127,7 +8278,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
return make_error<ImportError>(*Error);
}
- // If FromD has some updated flags after last import, apply it
+ // If FromD has some updated flags after last import, apply it.
updateFlags(FromD, ToD);
// If we encounter a cycle during an import then we save the relevant part
// of the import path associated to the Decl.
@@ -8222,6 +8373,15 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Make sure that ImportImpl registered the imported decl.
assert(ImportedDecls.count(FromD) != 0 && "Missing call to MapImported?");
+ if (FromD->hasAttrs())
+ for (const Attr *FromAttr : FromD->getAttrs()) {
+ auto ToAttrOrErr = Import(FromAttr);
+ if (ToAttrOrErr)
+ ToD->addAttr(*ToAttrOrErr);
+ else
+ return ToAttrOrErr.takeError();
+ }
+
// Notify subclasses.
Imported(FromD, ToD);
@@ -8625,7 +8785,7 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
}
ToID = ToSM.getFileID(MLoc);
} else {
- const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache();
+ const SrcMgr::ContentCache *Cache = &FromSLoc.getFile().getContentCache();
if (!IsBuiltin && !Cache->BufferOverridden) {
// Include location of this file.
@@ -8648,7 +8808,7 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
// FIXME: We definitely want to re-use the existing MemoryBuffer, rather
// than mmap the files several times.
auto Entry =
- ToFileManager.getFile(Cache->OrigEntry->getName());
+ ToFileManager.getOptionalFileRef(Cache->OrigEntry->getName());
// FIXME: The filename may be a virtual name that does probably not
// point to a valid file and we get no Entry here. In this case try with
// the memory buffer below.
@@ -8660,12 +8820,10 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
if (ToID.isInvalid() || IsBuiltin) {
// FIXME: We want to re-use the existing MemoryBuffer!
- bool Invalid = true;
- const llvm::MemoryBuffer *FromBuf =
- Cache->getBuffer(FromContext.getDiagnostics(),
- FromSM.getFileManager(), SourceLocation{}, &Invalid);
- if (!FromBuf || Invalid)
- // FIXME: Use a new error kind?
+ llvm::Optional<llvm::MemoryBufferRef> FromBuf =
+ Cache->getBufferOrNone(FromContext.getDiagnostics(),
+ FromSM.getFileManager(), SourceLocation{});
+ if (!FromBuf)
return llvm::make_error<ImportError>(ImportError::Unknown);
std::unique_ptr<llvm::MemoryBuffer> ToBuf =
@@ -8772,6 +8930,11 @@ ASTImporter::Import(const CXXBaseSpecifier *BaseSpec) {
return Imported;
}
+llvm::Expected<APValue> ASTImporter::Import(const APValue &FromValue) {
+ ASTNodeImporter Importer(*this);
+ return Importer.ImportAPValue(FromValue);
+}
+
Error ASTImporter::ImportDefinition(Decl *From) {
ExpectedDecl ToOrErr = Import(From);
if (!ToOrErr)
@@ -8902,6 +9065,169 @@ Expected<Selector> ASTImporter::Import(Selector FromSel) {
return ToContext.Selectors.getSelector(FromSel.getNumArgs(), Idents.data());
}
+llvm::Expected<APValue>
+ASTNodeImporter::ImportAPValue(const APValue &FromValue) {
+ APValue Result;
+ llvm::Error Err = llvm::Error::success();
+ auto ImportLoop = [&](const APValue *From, APValue *To, unsigned Size) {
+ for (unsigned Idx = 0; Idx < Size; Idx++) {
+ APValue Tmp = importChecked(Err, From[Idx]);
+ To[Idx] = Tmp;
+ }
+ };
+ switch (FromValue.getKind()) {
+ case APValue::None:
+ case APValue::Indeterminate:
+ case APValue::Int:
+ case APValue::Float:
+ case APValue::FixedPoint:
+ case APValue::ComplexInt:
+ case APValue::ComplexFloat:
+ Result = FromValue;
+ break;
+ case APValue::Vector: {
+ Result.MakeVector();
+ MutableArrayRef<APValue> Elts =
+ Result.setVectorUninit(FromValue.getVectorLength());
+ ImportLoop(((const APValue::Vec *)(const char *)&FromValue.Data)->Elts,
+ Elts.data(), FromValue.getVectorLength());
+ break;
+ }
+ case APValue::Array:
+ Result.MakeArray(FromValue.getArrayInitializedElts(),
+ FromValue.getArraySize());
+ ImportLoop(((const APValue::Arr *)(const char *)&FromValue.Data)->Elts,
+ ((const APValue::Arr *)(const char *)&Result.Data)->Elts,
+ FromValue.getArrayInitializedElts());
+ break;
+ case APValue::Struct:
+ Result.MakeStruct(FromValue.getStructNumBases(),
+ FromValue.getStructNumFields());
+ ImportLoop(
+ ((const APValue::StructData *)(const char *)&FromValue.Data)->Elts,
+ ((const APValue::StructData *)(const char *)&Result.Data)->Elts,
+ FromValue.getStructNumBases() + FromValue.getStructNumFields());
+ break;
+ case APValue::Union: {
+ Result.MakeUnion();
+ const Decl *ImpFDecl = importChecked(Err, FromValue.getUnionField());
+ APValue ImpValue = importChecked(Err, FromValue.getUnionValue());
+ if (Err)
+ return std::move(Err);
+ Result.setUnion(cast<FieldDecl>(ImpFDecl), ImpValue);
+ break;
+ }
+ case APValue::AddrLabelDiff: {
+ Result.MakeAddrLabelDiff();
+ const Expr *ImpLHS = importChecked(Err, FromValue.getAddrLabelDiffLHS());
+ const Expr *ImpRHS = importChecked(Err, FromValue.getAddrLabelDiffRHS());
+ if (Err)
+ return std::move(Err);
+ Result.setAddrLabelDiff(cast<AddrLabelExpr>(ImpLHS),
+ cast<AddrLabelExpr>(ImpRHS));
+ break;
+ }
+ case APValue::MemberPointer: {
+ const Decl *ImpMemPtrDecl =
+ importChecked(Err, FromValue.getMemberPointerDecl());
+ if (Err)
+ return std::move(Err);
+ MutableArrayRef<const CXXRecordDecl *> ToPath =
+ Result.setMemberPointerUninit(
+ cast<const ValueDecl>(ImpMemPtrDecl),
+ FromValue.isMemberPointerToDerivedMember(),
+ FromValue.getMemberPointerPath().size());
+ llvm::ArrayRef<const CXXRecordDecl *> FromPath =
+ Result.getMemberPointerPath();
+ for (unsigned Idx = 0; Idx < FromValue.getMemberPointerPath().size();
+ Idx++) {
+ const Decl *ImpDecl = importChecked(Err, FromPath[Idx]);
+ if (Err)
+ return std::move(Err);
+ ToPath[Idx] = cast<const CXXRecordDecl>(ImpDecl->getCanonicalDecl());
+ }
+ break;
+ }
+ case APValue::LValue:
+ APValue::LValueBase Base;
+ QualType FromElemTy;
+ if (FromValue.getLValueBase()) {
+ assert(!FromValue.getLValueBase().is<DynamicAllocLValue>() &&
+ "in C++20 dynamic allocation are transient so they shouldn't "
+ "appear in the AST");
+ if (!FromValue.getLValueBase().is<TypeInfoLValue>()) {
+ if (const auto *E =
+ FromValue.getLValueBase().dyn_cast<const Expr *>()) {
+ FromElemTy = E->getType();
+ const Expr *ImpExpr = importChecked(Err, E);
+ if (Err)
+ return std::move(Err);
+ Base = APValue::LValueBase(ImpExpr,
+ FromValue.getLValueBase().getCallIndex(),
+ FromValue.getLValueBase().getVersion());
+ } else {
+ FromElemTy =
+ FromValue.getLValueBase().get<const ValueDecl *>()->getType();
+ const Decl *ImpDecl = importChecked(
+ Err, FromValue.getLValueBase().get<const ValueDecl *>());
+ if (Err)
+ return std::move(Err);
+ Base = APValue::LValueBase(cast<ValueDecl>(ImpDecl),
+ FromValue.getLValueBase().getCallIndex(),
+ FromValue.getLValueBase().getVersion());
+ }
+ } else {
+ FromElemTy = FromValue.getLValueBase().getTypeInfoType();
+ QualType ImpTypeInfo = importChecked(
+ Err,
+ QualType(FromValue.getLValueBase().get<TypeInfoLValue>().getType(),
+ 0));
+ QualType ImpType =
+ importChecked(Err, FromValue.getLValueBase().getTypeInfoType());
+ if (Err)
+ return std::move(Err);
+ Base = APValue::LValueBase::getTypeInfo(
+ TypeInfoLValue(ImpTypeInfo.getTypePtr()), ImpType);
+ }
+ }
+ CharUnits Offset = FromValue.getLValueOffset();
+ unsigned PathLength = FromValue.getLValuePath().size();
+ Result.MakeLValue();
+ if (FromValue.hasLValuePath()) {
+ MutableArrayRef<APValue::LValuePathEntry> ToPath = Result.setLValueUninit(
+ Base, Offset, PathLength, FromValue.isLValueOnePastTheEnd(),
+ FromValue.isNullPointer());
+ llvm::ArrayRef<APValue::LValuePathEntry> FromPath =
+ FromValue.getLValuePath();
+ for (unsigned LoopIdx = 0; LoopIdx < PathLength; LoopIdx++) {
+ if (FromElemTy->isRecordType()) {
+ const Decl *FromDecl =
+ FromPath[LoopIdx].getAsBaseOrMember().getPointer();
+ const Decl *ImpDecl = importChecked(Err, FromDecl);
+ if (Err)
+ return std::move(Err);
+ if (auto *RD = dyn_cast<CXXRecordDecl>(FromDecl))
+ FromElemTy = Importer.FromContext.getRecordType(RD);
+ else
+ FromElemTy = cast<ValueDecl>(FromDecl)->getType();
+ ToPath[LoopIdx] = APValue::LValuePathEntry(APValue::BaseOrMemberType(
+ ImpDecl, FromPath[LoopIdx].getAsBaseOrMember().getInt()));
+ } else {
+ FromElemTy =
+ Importer.FromContext.getAsArrayType(FromElemTy)->getElementType();
+ ToPath[LoopIdx] = APValue::LValuePathEntry::ArrayIndex(
+ FromPath[LoopIdx].getAsArrayIndex());
+ }
+ }
+ } else
+ Result.setLValue(Base, Offset, APValue::NoLValuePath{},
+ FromValue.isNullPointer());
+ }
+ if (Err)
+ return std::move(Err);
+ return Result;
+}
+
Expected<DeclarationName> ASTImporter::HandleNameConflict(DeclarationName Name,
DeclContext *DC,
unsigned IDNS,
@@ -8961,7 +9287,11 @@ Decl *ASTImporter::MapImported(Decl *From, Decl *To) {
// This mapping should be maintained only in this function. Therefore do not
// check for additional consistency.
ImportedFromDecls[To] = From;
- AddToLookupTable(To);
+ // In the case of TypedefNameDecl we create the Decl first and only then we
+ // import and set its DeclContext. So, the DC is still not set when we reach
+ // here from GetImportedOrCreateDecl.
+ if (To->getDeclContext())
+ AddToLookupTable(To);
return To;
}
diff --git a/clang/lib/AST/ASTImporterLookupTable.cpp b/clang/lib/AST/ASTImporterLookupTable.cpp
index 4d6fff8f3419..e17d6082dcdc 100644
--- a/clang/lib/AST/ASTImporterLookupTable.cpp
+++ b/clang/lib/AST/ASTImporterLookupTable.cpp
@@ -22,6 +22,20 @@ namespace {
struct Builder : RecursiveASTVisitor<Builder> {
ASTImporterLookupTable &LT;
Builder(ASTImporterLookupTable &LT) : LT(LT) {}
+
+ bool VisitTypedefNameDecl(TypedefNameDecl *D) {
+ QualType Ty = D->getUnderlyingType();
+ Ty = Ty.getCanonicalType();
+ if (const auto *RTy = dyn_cast<RecordType>(Ty)) {
+ LT.add(RTy->getAsRecordDecl());
+ // iterate over the field decls, adding them
+ for (auto *it : RTy->getAsRecordDecl()->fields()) {
+ LT.add(it);
+ }
+ }
+ return true;
+ }
+
bool VisitNamedDecl(NamedDecl *D) {
LT.add(D);
return true;
diff --git a/clang/lib/AST/ASTStructuralEquivalence.cpp b/clang/lib/AST/ASTStructuralEquivalence.cpp
index 8b5b2444f1e2..d004e443ae06 100644
--- a/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -66,9 +66,15 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -149,32 +155,253 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
-/// Determine structural equivalence of two expressions.
-static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
- const Expr *E1, const Expr *E2) {
- if (!E1 || !E2)
- return E1 == E2;
+namespace {
+/// Encapsulates Stmt comparison logic.
+class StmtComparer {
+ StructuralEquivalenceContext &Context;
+
+ // IsStmtEquivalent overloads. Each overload compares a specific statement
+ // and only has to compare the data that is specific to the specific statement
+ // class. Should only be called from TraverseStmt.
+
+ bool IsStmtEquivalent(const AddrLabelExpr *E1, const AddrLabelExpr *E2) {
+ return IsStructurallyEquivalent(Context, E1->getLabel(), E2->getLabel());
+ }
+
+ bool IsStmtEquivalent(const AtomicExpr *E1, const AtomicExpr *E2) {
+ return E1->getOp() == E2->getOp();
+ }
- if (auto *DE1 = dyn_cast<DependentScopeDeclRefExpr>(E1)) {
- auto *DE2 = dyn_cast<DependentScopeDeclRefExpr>(E2);
- if (!DE2)
+ bool IsStmtEquivalent(const BinaryOperator *E1, const BinaryOperator *E2) {
+ return E1->getOpcode() == E2->getOpcode();
+ }
+
+ bool IsStmtEquivalent(const CallExpr *E1, const CallExpr *E2) {
+ // FIXME: IsStructurallyEquivalent requires non-const Decls.
+ Decl *Callee1 = const_cast<Decl *>(E1->getCalleeDecl());
+ Decl *Callee2 = const_cast<Decl *>(E2->getCalleeDecl());
+
+ // Compare whether both calls know their callee.
+ if (static_cast<bool>(Callee1) != static_cast<bool>(Callee2))
return false;
+
+ // Both calls have no callee, so nothing to do.
+ if (!static_cast<bool>(Callee1))
+ return true;
+
+ assert(Callee2);
+ return IsStructurallyEquivalent(Context, Callee1, Callee2);
+ }
+
+ bool IsStmtEquivalent(const CharacterLiteral *E1,
+ const CharacterLiteral *E2) {
+ return E1->getValue() == E2->getValue() && E1->getKind() == E2->getKind();
+ }
+
+ bool IsStmtEquivalent(const ChooseExpr *E1, const ChooseExpr *E2) {
+ return true; // Semantics only depend on children.
+ }
+
+ bool IsStmtEquivalent(const CompoundStmt *E1, const CompoundStmt *E2) {
+ // Number of children is actually checked by the generic children comparison
+ // code, but a CompoundStmt is one of the few statements where the number of
+ // children frequently differs and the number of statements is also always
+ // precomputed. Directly comparing the number of children here is thus
+ // just an optimization.
+ return E1->size() == E2->size();
+ }
+
+ bool IsStmtEquivalent(const DependentScopeDeclRefExpr *DE1,
+ const DependentScopeDeclRefExpr *DE2) {
if (!IsStructurallyEquivalent(Context, DE1->getDeclName(),
DE2->getDeclName()))
return false;
return IsStructurallyEquivalent(Context, DE1->getQualifier(),
DE2->getQualifier());
- } else if (auto CastE1 = dyn_cast<ImplicitCastExpr>(E1)) {
- auto *CastE2 = dyn_cast<ImplicitCastExpr>(E2);
- if (!CastE2)
+ }
+
+ bool IsStmtEquivalent(const Expr *E1, const Expr *E2) {
+ return IsStructurallyEquivalent(Context, E1->getType(), E2->getType());
+ }
+
+ bool IsStmtEquivalent(const ExpressionTraitExpr *E1,
+ const ExpressionTraitExpr *E2) {
+ return E1->getTrait() == E2->getTrait() && E1->getValue() == E2->getValue();
+ }
+
+ bool IsStmtEquivalent(const FloatingLiteral *E1, const FloatingLiteral *E2) {
+ return E1->isExact() == E2->isExact() && E1->getValue() == E2->getValue();
+ }
+
+ bool IsStmtEquivalent(const GenericSelectionExpr *E1,
+ const GenericSelectionExpr *E2) {
+ for (auto Pair : zip_longest(E1->getAssocTypeSourceInfos(),
+ E2->getAssocTypeSourceInfos())) {
+ Optional<TypeSourceInfo *> Child1 = std::get<0>(Pair);
+ Optional<TypeSourceInfo *> Child2 = std::get<1>(Pair);
+ // Skip this case if there are a different number of associated types.
+ if (!Child1 || !Child2)
+ return false;
+
+ if (!IsStructurallyEquivalent(Context, (*Child1)->getType(),
+ (*Child2)->getType()))
+ return false;
+ }
+
+ return true;
+ }
+
+ bool IsStmtEquivalent(const ImplicitCastExpr *CastE1,
+ const ImplicitCastExpr *CastE2) {
+ return IsStructurallyEquivalent(Context, CastE1->getType(),
+ CastE2->getType());
+ }
+
+ bool IsStmtEquivalent(const IntegerLiteral *E1, const IntegerLiteral *E2) {
+ return E1->getValue() == E2->getValue();
+ }
+
+ bool IsStmtEquivalent(const MemberExpr *E1, const MemberExpr *E2) {
+ return IsStructurallyEquivalent(Context, E1->getFoundDecl(),
+ E2->getFoundDecl());
+ }
+
+ bool IsStmtEquivalent(const ObjCStringLiteral *E1,
+ const ObjCStringLiteral *E2) {
+ // Just wraps a StringLiteral child.
+ return true;
+ }
+
+ bool IsStmtEquivalent(const Stmt *S1, const Stmt *S2) { return true; }
+
+ bool IsStmtEquivalent(const SourceLocExpr *E1, const SourceLocExpr *E2) {
+ return E1->getIdentKind() == E2->getIdentKind();
+ }
+
+ bool IsStmtEquivalent(const StmtExpr *E1, const StmtExpr *E2) {
+ return E1->getTemplateDepth() == E2->getTemplateDepth();
+ }
+
+ bool IsStmtEquivalent(const StringLiteral *E1, const StringLiteral *E2) {
+ return E1->getBytes() == E2->getBytes();
+ }
+
+ bool IsStmtEquivalent(const SubstNonTypeTemplateParmExpr *E1,
+ const SubstNonTypeTemplateParmExpr *E2) {
+ return IsStructurallyEquivalent(Context, E1->getParameter(),
+ E2->getParameter());
+ }
+
+ bool IsStmtEquivalent(const SubstNonTypeTemplateParmPackExpr *E1,
+ const SubstNonTypeTemplateParmPackExpr *E2) {
+ return IsStructurallyEquivalent(Context, E1->getArgumentPack(),
+ E2->getArgumentPack());
+ }
+
+ bool IsStmtEquivalent(const TypeTraitExpr *E1, const TypeTraitExpr *E2) {
+ if (E1->getTrait() != E2->getTrait())
+ return false;
+
+ for (auto Pair : zip_longest(E1->getArgs(), E2->getArgs())) {
+ Optional<TypeSourceInfo *> Child1 = std::get<0>(Pair);
+ Optional<TypeSourceInfo *> Child2 = std::get<1>(Pair);
+ // Different number of args.
+ if (!Child1 || !Child2)
+ return false;
+
+ if (!IsStructurallyEquivalent(Context, (*Child1)->getType(),
+ (*Child2)->getType()))
+ return false;
+ }
+ return true;
+ }
+
+ bool IsStmtEquivalent(const UnaryExprOrTypeTraitExpr *E1,
+ const UnaryExprOrTypeTraitExpr *E2) {
+ if (E1->getKind() != E2->getKind())
return false;
- if (!IsStructurallyEquivalent(Context, CastE1->getType(),
- CastE2->getType()))
+ return IsStructurallyEquivalent(Context, E1->getTypeOfArgument(),
+ E2->getTypeOfArgument());
+ }
+
+ bool IsStmtEquivalent(const UnaryOperator *E1, const UnaryOperator *E2) {
+ return E1->getOpcode() == E2->getOpcode();
+ }
+
+ bool IsStmtEquivalent(const VAArgExpr *E1, const VAArgExpr *E2) {
+ // Semantics only depend on children.
+ return true;
+ }
+
+ /// End point of the traversal chain.
+ bool TraverseStmt(const Stmt *S1, const Stmt *S2) { return true; }
+
+ // Create traversal methods that traverse the class hierarchy and return
+ // the accumulated result of the comparison. Each TraverseStmt overload
+ // calls the TraverseStmt overload of the parent class. For example,
+ // the TraverseStmt overload for 'BinaryOperator' calls the TraverseStmt
+ // overload of 'Expr' which then calls the overload for 'Stmt'.
+#define STMT(CLASS, PARENT) \
+ bool TraverseStmt(const CLASS *S1, const CLASS *S2) { \
+ if (!TraverseStmt(static_cast<const PARENT *>(S1), \
+ static_cast<const PARENT *>(S2))) \
+ return false; \
+ return IsStmtEquivalent(S1, S2); \
+ }
+#include "clang/AST/StmtNodes.inc"
+
+public:
+ StmtComparer(StructuralEquivalenceContext &C) : Context(C) {}
+
+ /// Determine whether two statements are equivalent. The statements have to
+ /// be of the same kind. The children of the statements and their properties
+ /// are not compared by this function.
+ bool IsEquivalent(const Stmt *S1, const Stmt *S2) {
+ if (S1->getStmtClass() != S2->getStmtClass())
+ return false;
+
+ // Each TraverseStmt walks the class hierarchy from the leaf class to
+ // the root class 'Stmt' (e.g. 'BinaryOperator' -> 'Expr' -> 'Stmt'). Cast
+ // the Stmt we have here to its specific subclass so that we call the
+ // overload that walks the whole class hierarchy from leaf to root (e.g.,
+ // cast to 'BinaryOperator' so that 'Expr' and 'Stmt' is traversed).
+ switch (S1->getStmtClass()) {
+ case Stmt::NoStmtClass:
+ llvm_unreachable("Can't traverse NoStmtClass");
+#define STMT(CLASS, PARENT) \
+ case Stmt::StmtClass::CLASS##Class: \
+ return TraverseStmt(static_cast<const CLASS *>(S1), \
+ static_cast<const CLASS *>(S2));
+#define ABSTRACT_STMT(S)
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("Invalid statement kind");
+ }
+};
+} // namespace
+
+/// Determine structural equivalence of two statements.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const Stmt *S1, const Stmt *S2) {
+ if (!S1 || !S2)
+ return S1 == S2;
+
+ // Compare the statements itself.
+ StmtComparer Comparer(Context);
+ if (!Comparer.IsEquivalent(S1, S2))
+ return false;
+
+ // Iterate over the children of both statements and also compare them.
+ for (auto Pair : zip_longest(S1->children(), S2->children())) {
+ Optional<const Stmt *> Child1 = std::get<0>(Pair);
+ Optional<const Stmt *> Child2 = std::get<1>(Pair);
+ // One of the statements has a different amount of children than the other,
+ // so the statements can't be equivalent.
+ if (!Child1 || !Child2)
+ return false;
+ if (!IsStructurallyEquivalent(Context, *Child1, *Child2))
return false;
- return IsStructurallyEquivalent(Context, CastE1->getSubExpr(),
- CastE2->getSubExpr());
}
- // FIXME: Handle other kind of expressions!
return true;
}
@@ -1047,48 +1274,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
}
- if (Field1->isBitField() != Field2->isBitField()) {
- if (Context.Complain) {
- Context.Diag2(
- Owner2->getLocation(),
- Context.getApplicableDiagnostic(diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(Owner2);
- if (Field1->isBitField()) {
- Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
- << Field1->getDeclName() << Field1->getType()
- << Field1->getBitWidthValue(Context.FromCtx);
- Context.Diag2(Field2->getLocation(), diag::note_odr_not_bit_field)
- << Field2->getDeclName();
- } else {
- Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
- << Field2->getDeclName() << Field2->getType()
- << Field2->getBitWidthValue(Context.ToCtx);
- Context.Diag1(Field1->getLocation(), diag::note_odr_not_bit_field)
- << Field1->getDeclName();
- }
- }
- return false;
- }
-
- if (Field1->isBitField()) {
- // Make sure that the bit-fields are the same length.
- unsigned Bits1 = Field1->getBitWidthValue(Context.FromCtx);
- unsigned Bits2 = Field2->getBitWidthValue(Context.ToCtx);
-
- if (Bits1 != Bits2) {
- if (Context.Complain) {
- Context.Diag2(Owner2->getLocation(),
- Context.getApplicableDiagnostic(
- diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(Owner2);
- Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
- << Field2->getDeclName() << Field2->getType() << Bits2;
- Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
- << Field1->getDeclName() << Field1->getType() << Bits1;
- }
- return false;
- }
- }
+ if (Field1->isBitField())
+ return IsStructurallyEquivalent(Context, Field1->getBitWidth(),
+ Field2->getBitWidth());
return true;
}
@@ -1161,6 +1349,17 @@ IsStructurallyEquivalentLambdas(StructuralEquivalenceContext &Context,
/// Determine structural equivalence of two records.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
RecordDecl *D1, RecordDecl *D2) {
+
+ // Check for equivalent structure names.
+ IdentifierInfo *Name1 = D1->getIdentifier();
+ if (!Name1 && D1->getTypedefNameForAnonDecl())
+ Name1 = D1->getTypedefNameForAnonDecl()->getIdentifier();
+ IdentifierInfo *Name2 = D2->getIdentifier();
+ if (!Name2 && D2->getTypedefNameForAnonDecl())
+ Name2 = D2->getTypedefNameForAnonDecl()->getIdentifier();
+ if (!IsStructurallyEquivalent(Name1, Name2))
+ return false;
+
if (D1->isUnion() != D2->isUnion()) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), Context.getApplicableDiagnostic(
@@ -1395,6 +1594,16 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
EnumDecl *D1, EnumDecl *D2) {
+ // Check for equivalent enum names.
+ IdentifierInfo *Name1 = D1->getIdentifier();
+ if (!Name1 && D1->getTypedefNameForAnonDecl())
+ Name1 = D1->getTypedefNameForAnonDecl()->getIdentifier();
+ IdentifierInfo *Name2 = D2->getIdentifier();
+ if (!Name2 && D2->getTypedefNameForAnonDecl())
+ Name2 = D2->getTypedefNameForAnonDecl()->getIdentifier();
+ if (!IsStructurallyEquivalent(Name1, Name2))
+ return false;
+
// Compare the definitions of these two enums. If either or both are
// incomplete (i.e. forward declared), we assume that they are equivalent.
D1 = D1->getDefinition();
@@ -1621,7 +1830,26 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TypedefNameDecl *D1, TypedefNameDecl *D2) {
+ if (!IsStructurallyEquivalent(D1->getIdentifier(), D2->getIdentifier()))
+ return false;
+
+ return IsStructurallyEquivalent(Context, D1->getUnderlyingType(),
+ D2->getUnderlyingType());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
FunctionDecl *D1, FunctionDecl *D2) {
+ if (!IsStructurallyEquivalent(D1->getIdentifier(), D2->getIdentifier()))
+ return false;
+
+ if (D1->isOverloadedOperator()) {
+ if (!D2->isOverloadedOperator())
+ return false;
+ if (D1->getOverloadedOperator() != D2->getOverloadedOperator())
+ return false;
+ }
+
// FIXME: Consider checking for function attributes as well.
if (!IsStructurallyEquivalent(Context, D1->getType(), D2->getType()))
return false;
@@ -1790,6 +2018,15 @@ bool StructuralEquivalenceContext::IsEquivalent(QualType T1, QualType T2) {
return !Finish();
}
+bool StructuralEquivalenceContext::IsEquivalent(Stmt *S1, Stmt *S2) {
+ assert(DeclsToCheck.empty());
+ assert(VisitedDecls.empty());
+ if (!::IsStructurallyEquivalent(*this, S1, S2))
+ return false;
+
+ return !Finish();
+}
+
bool StructuralEquivalenceContext::CheckCommonEquivalence(Decl *D1, Decl *D2) {
// Check for equivalent described template.
TemplateDecl *Template1 = D1->getDescribedTemplate();
@@ -1806,136 +2043,21 @@ bool StructuralEquivalenceContext::CheckCommonEquivalence(Decl *D1, Decl *D2) {
bool StructuralEquivalenceContext::CheckKindSpecificEquivalence(
Decl *D1, Decl *D2) {
- // FIXME: Switch on all declaration kinds. For now, we're just going to
- // check the obvious ones.
- if (auto *Record1 = dyn_cast<RecordDecl>(D1)) {
- if (auto *Record2 = dyn_cast<RecordDecl>(D2)) {
- // Check for equivalent structure names.
- IdentifierInfo *Name1 = Record1->getIdentifier();
- if (!Name1 && Record1->getTypedefNameForAnonDecl())
- Name1 = Record1->getTypedefNameForAnonDecl()->getIdentifier();
- IdentifierInfo *Name2 = Record2->getIdentifier();
- if (!Name2 && Record2->getTypedefNameForAnonDecl())
- Name2 = Record2->getTypedefNameForAnonDecl()->getIdentifier();
- if (!::IsStructurallyEquivalent(Name1, Name2) ||
- !::IsStructurallyEquivalent(*this, Record1, Record2))
- return false;
- } else {
- // Record/non-record mismatch.
- return false;
- }
- } else if (auto *Enum1 = dyn_cast<EnumDecl>(D1)) {
- if (auto *Enum2 = dyn_cast<EnumDecl>(D2)) {
- // Check for equivalent enum names.
- IdentifierInfo *Name1 = Enum1->getIdentifier();
- if (!Name1 && Enum1->getTypedefNameForAnonDecl())
- Name1 = Enum1->getTypedefNameForAnonDecl()->getIdentifier();
- IdentifierInfo *Name2 = Enum2->getIdentifier();
- if (!Name2 && Enum2->getTypedefNameForAnonDecl())
- Name2 = Enum2->getTypedefNameForAnonDecl()->getIdentifier();
- if (!::IsStructurallyEquivalent(Name1, Name2) ||
- !::IsStructurallyEquivalent(*this, Enum1, Enum2))
- return false;
- } else {
- // Enum/non-enum mismatch
- return false;
- }
- } else if (const auto *Typedef1 = dyn_cast<TypedefNameDecl>(D1)) {
- if (const auto *Typedef2 = dyn_cast<TypedefNameDecl>(D2)) {
- if (!::IsStructurallyEquivalent(Typedef1->getIdentifier(),
- Typedef2->getIdentifier()) ||
- !::IsStructurallyEquivalent(*this, Typedef1->getUnderlyingType(),
- Typedef2->getUnderlyingType()))
- return false;
- } else {
- // Typedef/non-typedef mismatch.
- return false;
- }
- } else if (auto *ClassTemplate1 = dyn_cast<ClassTemplateDecl>(D1)) {
- if (auto *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, ClassTemplate1,
- ClassTemplate2))
- return false;
- } else {
- // Class template/non-class-template mismatch.
- return false;
- }
- } else if (auto *FunctionTemplate1 = dyn_cast<FunctionTemplateDecl>(D1)) {
- if (auto *FunctionTemplate2 = dyn_cast<FunctionTemplateDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, FunctionTemplate1,
- FunctionTemplate2))
- return false;
- } else {
- // Class template/non-class-template mismatch.
- return false;
- }
- } else if (auto *ConceptDecl1 = dyn_cast<ConceptDecl>(D1)) {
- if (auto *ConceptDecl2 = dyn_cast<ConceptDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, ConceptDecl1, ConceptDecl2))
- return false;
- } else {
- // Concept/non-concept mismatch.
- return false;
- }
- } else if (auto *TTP1 = dyn_cast<TemplateTypeParmDecl>(D1)) {
- if (auto *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
- return false;
- } else {
- // Kind mismatch.
- return false;
- }
- } else if (auto *NTTP1 = dyn_cast<NonTypeTemplateParmDecl>(D1)) {
- if (auto *NTTP2 = dyn_cast<NonTypeTemplateParmDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, NTTP1, NTTP2))
- return false;
- } else {
- // Kind mismatch.
- return false;
- }
- } else if (auto *TTP1 = dyn_cast<TemplateTemplateParmDecl>(D1)) {
- if (auto *TTP2 = dyn_cast<TemplateTemplateParmDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
- return false;
- } else {
- // Kind mismatch.
- return false;
- }
- } else if (auto *MD1 = dyn_cast<CXXMethodDecl>(D1)) {
- if (auto *MD2 = dyn_cast<CXXMethodDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, MD1, MD2))
- return false;
- } else {
- // Kind mismatch.
- return false;
- }
- } else if (FunctionDecl *FD1 = dyn_cast<FunctionDecl>(D1)) {
- if (FunctionDecl *FD2 = dyn_cast<FunctionDecl>(D2)) {
- if (FD1->isOverloadedOperator()) {
- if (!FD2->isOverloadedOperator())
- return false;
- if (FD1->getOverloadedOperator() != FD2->getOverloadedOperator())
- return false;
- }
- if (!::IsStructurallyEquivalent(FD1->getIdentifier(),
- FD2->getIdentifier()))
- return false;
- if (!::IsStructurallyEquivalent(*this, FD1, FD2))
- return false;
- } else {
- // Kind mismatch.
- return false;
- }
- } else if (FriendDecl *FrD1 = dyn_cast<FriendDecl>(D1)) {
- if (FriendDecl *FrD2 = dyn_cast<FriendDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, FrD1, FrD2))
- return false;
- } else {
- // Kind mismatch.
- return false;
- }
- }
+ // Kind mismatch.
+ if (D1->getKind() != D2->getKind())
+ return false;
+
+ // Cast the Decls to their actual subclass so that the right overload of
+ // IsStructurallyEquivalent is called.
+ switch (D1->getKind()) {
+#define ABSTRACT_DECL(DECL)
+#define DECL(DERIVED, BASE) \
+ case Decl::Kind::DERIVED: \
+ return ::IsStructurallyEquivalent(*this, static_cast<DERIVED##Decl *>(D1), \
+ static_cast<DERIVED##Decl *>(D2));
+#include "clang/AST/DeclNodes.inc"
+ }
return true;
}
diff --git a/clang/lib/AST/ASTTypeTraits.cpp b/clang/lib/AST/ASTTypeTraits.cpp
index 34fc587694be..8f9ceea656a3 100644
--- a/clang/lib/AST/ASTTypeTraits.cpp
+++ b/clang/lib/AST/ASTTypeTraits.cpp
@@ -21,27 +21,29 @@
using namespace clang;
const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
- { NKI_None, "<None>" },
- { NKI_None, "TemplateArgument" },
- { NKI_None, "TemplateName" },
- { NKI_None, "NestedNameSpecifierLoc" },
- { NKI_None, "QualType" },
- { NKI_None, "TypeLoc" },
- { NKI_None, "CXXBaseSpecifier" },
- { NKI_None, "CXXCtorInitializer" },
- { NKI_None, "NestedNameSpecifier" },
- { NKI_None, "Decl" },
+ {NKI_None, "<None>"},
+ {NKI_None, "TemplateArgument"},
+ {NKI_None, "TemplateArgumentLoc"},
+ {NKI_None, "TemplateName"},
+ {NKI_None, "NestedNameSpecifierLoc"},
+ {NKI_None, "QualType"},
+ {NKI_None, "TypeLoc"},
+ {NKI_None, "CXXBaseSpecifier"},
+ {NKI_None, "CXXCtorInitializer"},
+ {NKI_None, "NestedNameSpecifier"},
+ {NKI_None, "Decl"},
#define DECL(DERIVED, BASE) { NKI_##BASE, #DERIVED "Decl" },
#include "clang/AST/DeclNodes.inc"
- { NKI_None, "Stmt" },
+ {NKI_None, "Stmt"},
#define STMT(DERIVED, BASE) { NKI_##BASE, #DERIVED },
#include "clang/AST/StmtNodes.inc"
- { NKI_None, "Type" },
+ {NKI_None, "Type"},
#define TYPE(DERIVED, BASE) { NKI_##BASE, #DERIVED "Type" },
#include "clang/AST/TypeNodes.inc"
- { NKI_None, "OMPClause" },
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) {NKI_OMPClause, #Class},
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ {NKI_None, "OMPClause"},
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) {NKI_OMPClause, #Class},
+#include "llvm/Frontend/OpenMP/OMP.inc"
};
bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
@@ -61,6 +63,17 @@ bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived,
return Derived == Base;
}
+ASTNodeKind ASTNodeKind::getCladeKind() const {
+ NodeKindId LastId = KindId;
+ while (LastId) {
+ NodeKindId ParentId = AllKindInfo[LastId].ParentId;
+ if (ParentId == NKI_None)
+ return LastId;
+ LastId = ParentId;
+ }
+ return NKI_None;
+}
+
StringRef ASTNodeKind::asStringRef() const { return AllKindInfo[KindId].Name; }
ASTNodeKind ASTNodeKind::getMostDerivedType(ASTNodeKind Kind1,
@@ -112,15 +125,14 @@ ASTNodeKind ASTNodeKind::getFromNode(const Type &T) {
ASTNodeKind ASTNodeKind::getFromNode(const OMPClause &C) {
switch (C.getClauseKind()) {
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) \
case llvm::omp::Clause::Enum: \
return ASTNodeKind(NKI_##Class);
-#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
+#define CLAUSE_NO_CLASS(Enum, Str) \
case llvm::omp::Clause::Enum: \
llvm_unreachable("unexpected OpenMP clause kind");
- default:
- break;
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMP.inc"
}
llvm_unreachable("invalid stmt kind");
}
@@ -129,6 +141,8 @@ void DynTypedNode::print(llvm::raw_ostream &OS,
const PrintingPolicy &PP) const {
if (const TemplateArgument *TA = get<TemplateArgument>())
TA->print(PP, OS);
+ else if (const TemplateArgumentLoc *TAL = get<TemplateArgumentLoc>())
+ TAL->getArgument().print(PP, OS);
else if (const TemplateName *TN = get<TemplateName>())
TN->print(OS, PP);
else if (const NestedNameSpecifier *NNS = get<NestedNameSpecifier>())
@@ -175,7 +189,11 @@ SourceRange DynTypedNode::getSourceRange() const {
return D->getSourceRange();
if (const Stmt *S = get<Stmt>())
return S->getSourceRange();
+ if (const TemplateArgumentLoc *TAL = get<TemplateArgumentLoc>())
+ return TAL->getSourceRange();
if (const auto *C = get<OMPClause>())
return SourceRange(C->getBeginLoc(), C->getEndLoc());
+ if (const auto *CBS = get<CXXBaseSpecifier>())
+ return CBS->getSourceRange();
return SourceRange();
}
diff --git a/clang/lib/AST/AttrImpl.cpp b/clang/lib/AST/AttrImpl.cpp
index 7818fbb1918b..09fdca67995f 100644
--- a/clang/lib/AST/AttrImpl.cpp
+++ b/clang/lib/AST/AttrImpl.cpp
@@ -42,7 +42,16 @@ std::string LoopHintAttr::getValueString(const PrintingPolicy &Policy) const {
OS << "(";
if (state == Numeric)
value->printPretty(OS, nullptr, Policy);
- else if (state == Enable)
+ else if (state == FixedWidth || state == ScalableWidth) {
+ if (value) {
+ value->printPretty(OS, nullptr, Policy);
+ if (state == ScalableWidth)
+ OS << ", scalable";
+ } else if (state == ScalableWidth)
+ OS << "scalable";
+ else
+ OS << "fixed";
+ } else if (state == Enable)
OS << "enable";
else if (state == Full)
OS << "full";
@@ -136,8 +145,16 @@ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) {
if (!VD->hasAttrs())
return llvm::None;
- if (const auto *Attr = VD->getAttr<OMPDeclareTargetDeclAttr>())
- return Attr->getMapType();
+ unsigned Level = 0;
+ const OMPDeclareTargetDeclAttr *FoundAttr = nullptr;
+ for (const auto *Attr : VD->specific_attrs<OMPDeclareTargetDeclAttr>()) {
+ if (Level < Attr->getLevel()) {
+ Level = Attr->getLevel();
+ FoundAttr = Attr;
+ }
+ }
+ if (FoundAttr)
+ return FoundAttr->getMapType();
return llvm::None;
}
@@ -146,8 +163,34 @@ llvm::Optional<OMPDeclareTargetDeclAttr::DevTypeTy>
OMPDeclareTargetDeclAttr::getDeviceType(const ValueDecl *VD) {
if (!VD->hasAttrs())
return llvm::None;
- if (const auto *Attr = VD->getAttr<OMPDeclareTargetDeclAttr>())
- return Attr->getDevType();
+ unsigned Level = 0;
+ const OMPDeclareTargetDeclAttr *FoundAttr = nullptr;
+ for (const auto *Attr : VD->specific_attrs<OMPDeclareTargetDeclAttr>()) {
+ if (Level < Attr->getLevel()) {
+ Level = Attr->getLevel();
+ FoundAttr = Attr;
+ }
+ }
+ if (FoundAttr)
+ return FoundAttr->getDevType();
+
+ return llvm::None;
+}
+
+llvm::Optional<SourceLocation>
+OMPDeclareTargetDeclAttr::getLocation(const ValueDecl *VD) {
+ if (!VD->hasAttrs())
+ return llvm::None;
+ unsigned Level = 0;
+ const OMPDeclareTargetDeclAttr *FoundAttr = nullptr;
+ for (const auto *Attr : VD->specific_attrs<OMPDeclareTargetDeclAttr>()) {
+ if (Level < Attr->getLevel()) {
+ Level = Attr->getLevel();
+ FoundAttr = Attr;
+ }
+ }
+ if (FoundAttr)
+ return FoundAttr->getRange().getBegin();
return llvm::None;
}
diff --git a/clang/lib/AST/CXXInheritance.cpp b/clang/lib/AST/CXXInheritance.cpp
index 8af97119e3cf..c87bcf31d120 100644
--- a/clang/lib/AST/CXXInheritance.cpp
+++ b/clang/lib/AST/CXXInheritance.cpp
@@ -33,29 +33,6 @@
using namespace clang;
-/// Computes the set of declarations referenced by these base
-/// paths.
-void CXXBasePaths::ComputeDeclsFound() {
- assert(NumDeclsFound == 0 && !DeclsFound &&
- "Already computed the set of declarations");
-
- llvm::SmallSetVector<NamedDecl *, 8> Decls;
- for (paths_iterator Path = begin(), PathEnd = end(); Path != PathEnd; ++Path)
- Decls.insert(Path->Decls.front());
-
- NumDeclsFound = Decls.size();
- DeclsFound = std::make_unique<NamedDecl *[]>(NumDeclsFound);
- std::copy(Decls.begin(), Decls.end(), DeclsFound.get());
-}
-
-CXXBasePaths::decl_range CXXBasePaths::found_decls() {
- if (NumDeclsFound == 0)
- ComputeDeclsFound();
-
- return decl_range(decl_iterator(DeclsFound.get()),
- decl_iterator(DeclsFound.get() + NumDeclsFound));
-}
-
/// isAmbiguous - Determines whether the set of paths provided is
/// ambiguous, i.e., there are two or more paths that refer to
/// different base class subobjects of the same type. BaseType must be
@@ -402,54 +379,45 @@ bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
->getCanonicalDecl() == BaseRecord;
}
-bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier,
- CXXBasePath &Path,
- DeclarationName Name) {
- RecordDecl *BaseRecord =
- Specifier->getType()->castAs<RecordType>()->getDecl();
-
- for (Path.Decls = BaseRecord->lookup(Name);
- !Path.Decls.empty();
- Path.Decls = Path.Decls.slice(1)) {
- if (Path.Decls.front()->isInIdentifierNamespace(IDNS_Tag))
- return true;
- }
-
- return false;
+static bool isOrdinaryMember(const NamedDecl *ND) {
+ return ND->isInIdentifierNamespace(Decl::IDNS_Ordinary | Decl::IDNS_Tag |
+ Decl::IDNS_Member);
}
-static bool findOrdinaryMember(RecordDecl *BaseRecord, CXXBasePath &Path,
+static bool findOrdinaryMember(const CXXRecordDecl *RD, CXXBasePath &Path,
DeclarationName Name) {
- const unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag |
- Decl::IDNS_Member;
- for (Path.Decls = BaseRecord->lookup(Name);
- !Path.Decls.empty();
- Path.Decls = Path.Decls.slice(1)) {
- if (Path.Decls.front()->isInIdentifierNamespace(IDNS))
+ Path.Decls = RD->lookup(Name);
+ for (NamedDecl *ND : Path.Decls)
+ if (isOrdinaryMember(ND))
return true;
- }
return false;
}
-bool CXXRecordDecl::FindOrdinaryMember(const CXXBaseSpecifier *Specifier,
- CXXBasePath &Path,
- DeclarationName Name) {
- RecordDecl *BaseRecord =
- Specifier->getType()->castAs<RecordType>()->getDecl();
- return findOrdinaryMember(BaseRecord, Path, Name);
+bool CXXRecordDecl::hasMemberName(DeclarationName Name) const {
+ CXXBasePath P;
+ if (findOrdinaryMember(this, P, Name))
+ return true;
+
+ CXXBasePaths Paths(false, false, false);
+ return lookupInBases(
+ [Name](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ return findOrdinaryMember(Specifier->getType()->getAsCXXRecordDecl(),
+ Path, Name);
+ },
+ Paths);
}
-bool CXXRecordDecl::FindOrdinaryMemberInDependentClasses(
- const CXXBaseSpecifier *Specifier, CXXBasePath &Path,
- DeclarationName Name) {
+static bool
+findOrdinaryMemberInDependentClasses(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path, DeclarationName Name) {
const TemplateSpecializationType *TST =
Specifier->getType()->getAs<TemplateSpecializationType>();
if (!TST) {
auto *RT = Specifier->getType()->getAs<RecordType>();
if (!RT)
return false;
- return findOrdinaryMember(RT->getDecl(), Path, Name);
+ return findOrdinaryMember(cast<CXXRecordDecl>(RT->getDecl()), Path, Name);
}
TemplateName TN = TST->getTemplateName();
const auto *TD = dyn_cast_or_null<ClassTemplateDecl>(TN.getAsTemplateDecl());
@@ -461,80 +429,32 @@ bool CXXRecordDecl::FindOrdinaryMemberInDependentClasses(
return findOrdinaryMember(RD, Path, Name);
}
-bool CXXRecordDecl::FindOMPReductionMember(const CXXBaseSpecifier *Specifier,
- CXXBasePath &Path,
- DeclarationName Name) {
- RecordDecl *BaseRecord =
- Specifier->getType()->castAs<RecordType>()->getDecl();
-
- for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
- Path.Decls = Path.Decls.slice(1)) {
- if (Path.Decls.front()->isInIdentifierNamespace(IDNS_OMPReduction))
- return true;
- }
-
- return false;
-}
-
-bool CXXRecordDecl::FindOMPMapperMember(const CXXBaseSpecifier *Specifier,
- CXXBasePath &Path,
- DeclarationName Name) {
- RecordDecl *BaseRecord =
- Specifier->getType()->castAs<RecordType>()->getDecl();
-
- for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
- Path.Decls = Path.Decls.slice(1)) {
- if (Path.Decls.front()->isInIdentifierNamespace(IDNS_OMPMapper))
- return true;
- }
-
- return false;
-}
-
-bool CXXRecordDecl::
-FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier,
- CXXBasePath &Path,
- DeclarationName Name) {
- RecordDecl *BaseRecord =
- Specifier->getType()->castAs<RecordType>()->getDecl();
-
- for (Path.Decls = BaseRecord->lookup(Name);
- !Path.Decls.empty();
- Path.Decls = Path.Decls.slice(1)) {
- // FIXME: Refactor the "is it a nested-name-specifier?" check
- if (isa<TypedefNameDecl>(Path.Decls.front()) ||
- Path.Decls.front()->isInIdentifierNamespace(IDNS_Tag))
- return true;
- }
-
- return false;
-}
-
std::vector<const NamedDecl *> CXXRecordDecl::lookupDependentName(
- const DeclarationName &Name,
+ DeclarationName Name,
llvm::function_ref<bool(const NamedDecl *ND)> Filter) {
std::vector<const NamedDecl *> Results;
// Lookup in the class.
- DeclContext::lookup_result DirectResult = lookup(Name);
- if (!DirectResult.empty()) {
- for (const NamedDecl *ND : DirectResult) {
- if (Filter(ND))
- Results.push_back(ND);
- }
- return Results;
+ bool AnyOrdinaryMembers = false;
+ for (const NamedDecl *ND : lookup(Name)) {
+ if (isOrdinaryMember(ND))
+ AnyOrdinaryMembers = true;
+ if (Filter(ND))
+ Results.push_back(ND);
}
+ if (AnyOrdinaryMembers)
+ return Results;
+
// Perform lookup into our base classes.
CXXBasePaths Paths;
Paths.setOrigin(this);
if (!lookupInBases(
[&](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
- return CXXRecordDecl::FindOrdinaryMemberInDependentClasses(
- Specifier, Path, Name);
+ return findOrdinaryMemberInDependentClasses(Specifier, Path, Name);
},
Paths, /*LookupInDependent=*/true))
return Results;
for (const NamedDecl *ND : Paths.front().Decls) {
- if (Filter(ND))
+ if (isOrdinaryMember(ND) && Filter(ND))
Results.push_back(ND);
}
return Results;
diff --git a/clang/lib/AST/CommentLexer.cpp b/clang/lib/AST/CommentLexer.cpp
index c1ea3eab075e..4bebd41e15ee 100644
--- a/clang/lib/AST/CommentLexer.cpp
+++ b/clang/lib/AST/CommentLexer.cpp
@@ -740,12 +740,11 @@ void Lexer::lexHTMLEndTag(Token &T) {
Lexer::Lexer(llvm::BumpPtrAllocator &Allocator, DiagnosticsEngine &Diags,
const CommandTraits &Traits, SourceLocation FileLoc,
- const char *BufferStart, const char *BufferEnd,
- bool ParseCommands)
+ const char *BufferStart, const char *BufferEnd, bool ParseCommands)
: Allocator(Allocator), Diags(Diags), Traits(Traits),
- BufferStart(BufferStart), BufferEnd(BufferEnd), FileLoc(FileLoc),
- BufferPtr(BufferStart), CommentState(LCS_BeforeComment), State(LS_Normal),
- ParseCommands(ParseCommands) {}
+ BufferStart(BufferStart), BufferEnd(BufferEnd), BufferPtr(BufferStart),
+ FileLoc(FileLoc), ParseCommands(ParseCommands),
+ CommentState(LCS_BeforeComment), State(LS_Normal) {}
void Lexer::lex(Token &T) {
again:
diff --git a/clang/lib/AST/ComparisonCategories.cpp b/clang/lib/AST/ComparisonCategories.cpp
index 6b6826c02a12..896050482644 100644
--- a/clang/lib/AST/ComparisonCategories.cpp
+++ b/clang/lib/AST/ComparisonCategories.cpp
@@ -42,7 +42,7 @@ clang::getComparisonCategoryForBuiltinCmp(QualType T) {
bool ComparisonCategoryInfo::ValueInfo::hasValidIntValue() const {
assert(VD && "must have var decl");
- if (!VD->checkInitIsICE())
+ if (!VD->isUsableInConstantExpressions(VD->getASTContext()))
return false;
// Before we attempt to get the value of the first field, ensure that we
diff --git a/clang/lib/AST/ComputeDependence.cpp b/clang/lib/AST/ComputeDependence.cpp
index 2333993dbeb4..4026fdc76fd6 100644
--- a/clang/lib/AST/ComputeDependence.cpp
+++ b/clang/lib/AST/ComputeDependence.cpp
@@ -37,9 +37,39 @@ ExprDependence clang::computeDependence(ParenExpr *E) {
return E->getSubExpr()->getDependence();
}
-ExprDependence clang::computeDependence(UnaryOperator *E) {
- return toExprDependence(E->getType()->getDependence()) |
- E->getSubExpr()->getDependence();
+ExprDependence clang::computeDependence(UnaryOperator *E,
+ const ASTContext &Ctx) {
+ ExprDependence Dep = toExprDependence(E->getType()->getDependence()) |
+ E->getSubExpr()->getDependence();
+
+ // C++ [temp.dep.constexpr]p5:
+ // An expression of the form & qualified-id where the qualified-id names a
+ // dependent member of the current instantiation is value-dependent. An
+ // expression of the form & cast-expression is also value-dependent if
+ // evaluating cast-expression as a core constant expression succeeds and
+ // the result of the evaluation refers to a templated entity that is an
+ // object with static or thread storage duration or a member function.
+ //
+ // What this amounts to is: constant-evaluate the operand and check whether it
+ // refers to a templated entity other than a variable with local storage.
+ if (Ctx.getLangOpts().CPlusPlus && E->getOpcode() == UO_AddrOf &&
+ !(Dep & ExprDependence::Value)) {
+ Expr::EvalResult Result;
+ SmallVector<PartialDiagnosticAt, 8> Diag;
+ Result.Diag = &Diag;
+ // FIXME: This doesn't enforce the C++98 constant expression rules.
+ if (E->getSubExpr()->EvaluateAsConstantExpr(Result, Ctx) && Diag.empty() &&
+ Result.Val.isLValue()) {
+ auto *VD = Result.Val.getLValueBase().dyn_cast<const ValueDecl *>();
+ if (VD && VD->isTemplated()) {
+ auto *VarD = dyn_cast<VarDecl>(VD);
+ if (!VarD || !VarD->hasLocalStorage())
+ Dep |= ExprDependence::Value;
+ }
+ }
+ }
+
+ return Dep;
}
ExprDependence clang::computeDependence(UnaryExprOrTypeTraitExpr *E) {
@@ -423,22 +453,21 @@ ExprDependence clang::computeDependence(DeclRefExpr *E, const ASTContext &Ctx) {
Deps |= ExprDependence::UnexpandedPack;
Deps |= toExprDependence(Type->getDependence()) & ExprDependence::Error;
- // (TD) C++ [temp.dep.expr]p3:
+ // C++ [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains:
- //
- // and
- //
- // (VD) C++ [temp.dep.constexpr]p2:
- // An identifier is value-dependent if it is:
- // (TD) - an identifier that was declared with dependent type
- // (VD) - a name declared with a dependent type,
+ // - an identifier associated by name lookup with one or more declarations
+ // declared with a dependent type
+ //
+ // [The "or more" case is not modeled as a DeclRefExpr. There are a bunch
+ // more bullets here that we handle by treating the declaration as having a
+ // dependent type if they involve a placeholder type that can't be deduced.]
if (Type->isDependentType())
return Deps | ExprDependence::TypeValueInstantiation;
else if (Type->isInstantiationDependentType())
Deps |= ExprDependence::Instantiation;
- // (TD) - a conversion-function-id that specifies a dependent type
+ // - a conversion-function-id that specifies a dependent type
if (Decl->getDeclName().getNameKind() ==
DeclarationName::CXXConversionFunctionName) {
QualType T = Decl->getDeclName().getCXXNameType();
@@ -449,48 +478,65 @@ ExprDependence clang::computeDependence(DeclRefExpr *E, const ASTContext &Ctx) {
Deps |= ExprDependence::Instantiation;
}
- // (VD) - the name of a non-type template parameter,
+ // - a template-id that is dependent,
+ // - a nested-name-specifier or a qualified-id that names a member of an
+ // unknown specialization
+ // [These are not modeled as DeclRefExprs.]
+
+ // or if it names a dependent member of the current instantiation that is a
+ // static data member of type "array of unknown bound of T" for some T
+ // [handled below].
+
+ // C++ [temp.dep.constexpr]p2:
+ // An id-expression is value-dependent if:
+
+ // - it is type-dependent [handled above]
+
+ // - it is the name of a non-type template parameter,
if (isa<NonTypeTemplateParmDecl>(Decl))
return Deps | ExprDependence::ValueInstantiation;
- // (VD) - a constant with integral or enumeration type and is
- // initialized with an expression that is value-dependent.
- // (VD) - a constant with literal type and is initialized with an
- // expression that is value-dependent [C++11].
- // (VD) - FIXME: Missing from the standard:
- // - an entity with reference type and is initialized with an
- // expression that is value-dependent [C++11]
- if (VarDecl *Var = dyn_cast<VarDecl>(Decl)) {
- if ((Ctx.getLangOpts().CPlusPlus11
- ? Var->getType()->isLiteralType(Ctx)
- : Var->getType()->isIntegralOrEnumerationType()) &&
- (Var->getType().isConstQualified() ||
- Var->getType()->isReferenceType())) {
- if (const Expr *Init = Var->getAnyInitializer())
- if (Init->isValueDependent()) {
+ // - it names a potentially-constant variable that is initialized with an
+ // expression that is value-dependent
+ if (const auto *Var = dyn_cast<VarDecl>(Decl)) {
+ if (Var->mightBeUsableInConstantExpressions(Ctx)) {
+ if (const Expr *Init = Var->getAnyInitializer()) {
+ if (Init->isValueDependent())
Deps |= ExprDependence::ValueInstantiation;
- }
+ if (Init->containsErrors())
+ Deps |= ExprDependence::Error;
+ }
}
- // (VD) - FIXME: Missing from the standard:
- // - a member function or a static data member of the current
- // instantiation
+ // - it names a static data member that is a dependent member of the
+ // current instantiation and is not initialized in a member-declarator,
if (Var->isStaticDataMember() &&
- Var->getDeclContext()->isDependentContext()) {
- Deps |= ExprDependence::ValueInstantiation;
- TypeSourceInfo *TInfo = Var->getFirstDecl()->getTypeSourceInfo();
- if (TInfo->getType()->isIncompleteArrayType())
- Deps |= ExprDependence::Type;
+ Var->getDeclContext()->isDependentContext() &&
+ !Var->getFirstDecl()->hasInit()) {
+ const VarDecl *First = Var->getFirstDecl();
+ TypeSourceInfo *TInfo = First->getTypeSourceInfo();
+ if (TInfo->getType()->isIncompleteArrayType()) {
+ Deps |= ExprDependence::TypeValueInstantiation;
+ } else if (!First->hasInit()) {
+ Deps |= ExprDependence::ValueInstantiation;
+ }
}
return Deps;
}
- // (VD) - FIXME: Missing from the standard:
- // - a member function or a static data member of the current
- // instantiation
- if (isa<CXXMethodDecl>(Decl) && Decl->getDeclContext()->isDependentContext())
- Deps |= ExprDependence::ValueInstantiation;
+ // - it names a static member function that is a dependent member of the
+ // current instantiation
+ //
+ // FIXME: It's unclear that the restriction to static members here has any
+ // effect: any use of a non-static member function name requires either
+ // forming a pointer-to-member or providing an object parameter, either of
+ // which makes the overall expression value-dependent.
+ if (auto *MD = dyn_cast<CXXMethodDecl>(Decl)) {
+ if (MD->isStatic() && Decl->getDeclContext()->isDependentContext())
+ Deps |= ExprDependence::ValueInstantiation;
+ }
+
return Deps;
}
@@ -502,7 +548,7 @@ ExprDependence clang::computeDependence(RecoveryExpr *E) {
// dependent type), or the type is known and dependent, or it has
// type-dependent subexpressions.
auto D = toExprDependence(E->getType()->getDependence()) |
- ExprDependence::ValueInstantiation | ExprDependence::Error;
+ ExprDependence::ErrorDependent;
// FIXME: remove the type-dependent bit from subexpressions, if the
// RecoveryExpr has a non-dependent type.
for (auto *S : E->subExpressions())
@@ -694,6 +740,10 @@ ExprDependence clang::computeDependence(CXXConstructExpr *E) {
return D;
}
+ExprDependence clang::computeDependence(CXXDefaultInitExpr *E) {
+ return E->getExpr()->getDependence();
+}
+
ExprDependence clang::computeDependence(LambdaExpr *E,
bool ContainsUnexpandedParameterPack) {
auto D = toExprDependence(E->getType()->getDependence());
@@ -705,8 +755,6 @@ ExprDependence clang::computeDependence(LambdaExpr *E,
ExprDependence clang::computeDependence(CXXUnresolvedConstructExpr *E) {
auto D = ExprDependence::ValueInstantiation;
D |= toExprDependence(E->getType()->getDependence());
- if (E->getType()->getContainedDeducedType())
- D |= ExprDependence::Type;
for (auto *A : E->arguments())
D |= A->getDependence() &
(ExprDependence::UnexpandedPack | ExprDependence::Error);
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index 5c0a98815dd7..feb9b0645ebc 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -1299,7 +1299,8 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
// we should not make static local variables in the function hidden.
LV = getLVForDecl(FD, computation);
if (isa<VarDecl>(D) && useInlineVisibilityHidden(FD) &&
- !LV.isVisibilityExplicit()) {
+ !LV.isVisibilityExplicit() &&
+ !Context.getLangOpts().VisibilityInlinesHiddenStaticLocalVar) {
assert(cast<VarDecl>(D)->isStaticLocal());
// If this was an implicitly hidden inline method, check again for
// explicit visibility on the parent class, and use that for static locals
@@ -1394,6 +1395,15 @@ LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D,
break;
}
+
+ case Decl::TemplateParamObject: {
+ // The template parameter object can be referenced from anywhere its type
+ // and value can be referenced.
+ auto *TPO = cast<TemplateParamObjectDecl>(D);
+ LinkageInfo LV = getLVForType(*TPO->getType(), computation);
+ LV.merge(getLVForValue(TPO->getValue(), computation));
+ return LV;
+ }
}
// Handle linkage for namespace-scope names.
@@ -1590,21 +1600,37 @@ void NamedDecl::printNestedNameSpecifier(raw_ostream &OS,
ContextsTy Contexts;
// Collect named contexts.
- while (Ctx) {
- if (isa<NamedDecl>(Ctx))
- Contexts.push_back(Ctx);
- Ctx = Ctx->getParent();
+ DeclarationName NameInScope = getDeclName();
+ for (; Ctx; Ctx = Ctx->getParent()) {
+ // Suppress anonymous namespace if requested.
+ if (P.SuppressUnwrittenScope && isa<NamespaceDecl>(Ctx) &&
+ cast<NamespaceDecl>(Ctx)->isAnonymousNamespace())
+ continue;
+
+ // Suppress inline namespace if it doesn't make the result ambiguous.
+ if (P.SuppressInlineNamespace && Ctx->isInlineNamespace() && NameInScope &&
+ Ctx->lookup(NameInScope).size() ==
+ Ctx->getParent()->lookup(NameInScope).size())
+ continue;
+
+ // Skip non-named contexts such as linkage specifications and ExportDecls.
+ const NamedDecl *ND = dyn_cast<NamedDecl>(Ctx);
+ if (!ND)
+ continue;
+
+ Contexts.push_back(Ctx);
+ NameInScope = ND->getDeclName();
}
- for (const DeclContext *DC : llvm::reverse(Contexts)) {
+ for (unsigned I = Contexts.size(); I != 0; --I) {
+ const DeclContext *DC = Contexts[I - 1];
if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
OS << Spec->getName();
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
- printTemplateArgumentList(OS, TemplateArgs.asArray(), P);
+ printTemplateArgumentList(
+ OS, TemplateArgs.asArray(), P,
+ Spec->getSpecializedTemplate()->getTemplateParameters());
} else if (const auto *ND = dyn_cast<NamespaceDecl>(DC)) {
- if (P.SuppressUnwrittenScope &&
- (ND->isAnonymousNamespace() || ND->isInline()))
- continue;
if (ND->isAnonymousNamespace()) {
OS << (P.MSVCFormatting ? "`anonymous namespace\'"
: "(anonymous namespace)");
@@ -1797,8 +1823,7 @@ template <typename DeclT>
static SourceLocation getTemplateOrInnerLocStart(const DeclT *decl) {
if (decl->getNumTemplateParameterLists() > 0)
return decl->getTemplateParameterList(0)->getTemplateLoc();
- else
- return decl->getInnerLocStart();
+ return decl->getInnerLocStart();
}
SourceLocation DeclaratorDecl::getTypeSpecStartLoc() const {
@@ -2106,10 +2131,9 @@ VarDecl::isThisDeclarationADefinition(ASTContext &C) const {
TSK_ExplicitSpecialization) ||
isa<VarTemplatePartialSpecializationDecl>(this)))
return Definition;
- else if (!isOutOfLine() && isInline())
+ if (!isOutOfLine() && isInline())
return Definition;
- else
- return DeclarationOnly;
+ return DeclarationOnly;
}
// C99 6.7p5:
// A definition of an identifier is a declaration for that identifier that
@@ -2172,7 +2196,7 @@ VarDecl *VarDecl::getActingDefinition() {
Kind = I->isThisDeclarationADefinition();
if (Kind == Definition)
return nullptr;
- else if (Kind == TentativeDefinition)
+ if (Kind == TentativeDefinition)
LastTentative = I;
}
return LastTentative;
@@ -2244,8 +2268,7 @@ VarDecl *VarDecl::getInitializingDeclaration() {
if (I->isThisDeclarationADefinition()) {
if (isStaticDataMember())
return I;
- else
- Def = I;
+ Def = I;
}
}
return Def;
@@ -2276,16 +2299,22 @@ void VarDecl::setInit(Expr *I) {
Init = I;
}
-bool VarDecl::mightBeUsableInConstantExpressions(ASTContext &C) const {
+bool VarDecl::mightBeUsableInConstantExpressions(const ASTContext &C) const {
const LangOptions &Lang = C.getLangOpts();
- if (!Lang.CPlusPlus)
+ // OpenCL permits const integral variables to be used in constant
+ // expressions, like in C++98.
+ if (!Lang.CPlusPlus && !Lang.OpenCL)
return false;
// Function parameters are never usable in constant expressions.
if (isa<ParmVarDecl>(this))
return false;
+ // The values of weak variables are never usable in constant expressions.
+ if (isWeak())
+ return false;
+
// In C++11, any variable of reference type can be used in a constant
// expression if it is initialized by a constant expression.
if (Lang.CPlusPlus11 && getType()->isReferenceType())
@@ -2294,7 +2323,7 @@ bool VarDecl::mightBeUsableInConstantExpressions(ASTContext &C) const {
// Only const objects can be used in constant expressions in C++. C++98 does
// not require the variable to be non-volatile, but we consider this to be a
// defect.
- if (!getType().isConstQualified() || getType().isVolatileQualified())
+ if (!getType().isConstant(C) || getType().isVolatileQualified())
return false;
// In C++, const, non-volatile variables of integral or enumeration types
@@ -2307,7 +2336,7 @@ bool VarDecl::mightBeUsableInConstantExpressions(ASTContext &C) const {
return Lang.CPlusPlus11 && isConstexpr();
}
-bool VarDecl::isUsableInConstantExpressions(ASTContext &Context) const {
+bool VarDecl::isUsableInConstantExpressions(const ASTContext &Context) const {
// C++2a [expr.const]p3:
// A variable is usable in constant expressions after its initializing
// declaration is encountered...
@@ -2320,7 +2349,16 @@ bool VarDecl::isUsableInConstantExpressions(ASTContext &Context) const {
if (!DefVD->mightBeUsableInConstantExpressions(Context))
return false;
// ... and its initializer is a constant initializer.
- return DefVD->checkInitIsICE();
+ if (Context.getLangOpts().CPlusPlus && !DefVD->hasConstantInitialization())
+ return false;
+ // C++98 [expr.const]p1:
+ // An integral constant-expression can involve only [...] const variables
+ // or static data members of integral or enumeration types initialized with
+ // [integer] constant expressions (dcl.init)
+ if ((Context.getLangOpts().CPlusPlus || Context.getLangOpts().OpenCL) &&
+ !Context.getLangOpts().CPlusPlus11 && !DefVD->hasICEInitializer(Context))
+ return false;
+ return true;
}
/// Convert the initializer for this declaration to the elaborated EvaluatedStmt
@@ -2340,35 +2378,45 @@ EvaluatedStmt *VarDecl::ensureEvaluatedStmt() const {
return Eval;
}
+EvaluatedStmt *VarDecl::getEvaluatedStmt() const {
+ return Init.dyn_cast<EvaluatedStmt *>();
+}
+
APValue *VarDecl::evaluateValue() const {
SmallVector<PartialDiagnosticAt, 8> Notes;
- return evaluateValue(Notes);
+ return evaluateValueImpl(Notes, hasConstantInitialization());
}
-APValue *VarDecl::evaluateValue(
- SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+APValue *VarDecl::evaluateValueImpl(SmallVectorImpl<PartialDiagnosticAt> &Notes,
+ bool IsConstantInitialization) const {
EvaluatedStmt *Eval = ensureEvaluatedStmt();
+ const auto *Init = cast<Expr>(Eval->Value);
+ assert(!Init->isValueDependent());
+
// We only produce notes indicating why an initializer is non-constant the
// first time it is evaluated. FIXME: The notes won't always be emitted the
// first time we try evaluation, so might not be produced at all.
if (Eval->WasEvaluated)
return Eval->Evaluated.isAbsent() ? nullptr : &Eval->Evaluated;
- const auto *Init = cast<Expr>(Eval->Value);
- assert(!Init->isValueDependent());
-
if (Eval->IsEvaluating) {
// FIXME: Produce a diagnostic for self-initialization.
- Eval->CheckedICE = true;
- Eval->IsICE = false;
return nullptr;
}
Eval->IsEvaluating = true;
- bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, getASTContext(),
- this, Notes);
+ ASTContext &Ctx = getASTContext();
+ bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, Ctx, this, Notes,
+ IsConstantInitialization);
+
+ // In C++11, this isn't a constant initializer if we produced notes. In that
+ // case, we can't keep the result, because it may only be correct under the
+ // assumption that the initializer is a constant context.
+ if (IsConstantInitialization && Ctx.getLangOpts().CPlusPlus11 &&
+ !Notes.empty())
+ Result = false;
// Ensure the computed APValue is cleaned up later if evaluation succeeded,
// or that it's empty (so that there's nothing to clean up) if evaluation
@@ -2376,76 +2424,69 @@ APValue *VarDecl::evaluateValue(
if (!Result)
Eval->Evaluated = APValue();
else if (Eval->Evaluated.needsCleanup())
- getASTContext().addDestruction(&Eval->Evaluated);
+ Ctx.addDestruction(&Eval->Evaluated);
Eval->IsEvaluating = false;
Eval->WasEvaluated = true;
- // In C++11, we have determined whether the initializer was a constant
- // expression as a side-effect.
- if (getASTContext().getLangOpts().CPlusPlus11 && !Eval->CheckedICE) {
- Eval->CheckedICE = true;
- Eval->IsICE = Result && Notes.empty();
- }
-
return Result ? &Eval->Evaluated : nullptr;
}
APValue *VarDecl::getEvaluatedValue() const {
- if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
+ if (EvaluatedStmt *Eval = getEvaluatedStmt())
if (Eval->WasEvaluated)
return &Eval->Evaluated;
return nullptr;
}
-bool VarDecl::isInitKnownICE() const {
- if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
- return Eval->CheckedICE;
+bool VarDecl::hasICEInitializer(const ASTContext &Context) const {
+ const Expr *Init = getInit();
+ assert(Init && "no initializer");
- return false;
+ EvaluatedStmt *Eval = ensureEvaluatedStmt();
+ if (!Eval->CheckedForICEInit) {
+ Eval->CheckedForICEInit = true;
+ Eval->HasICEInit = Init->isIntegerConstantExpr(Context);
+ }
+ return Eval->HasICEInit;
}
-bool VarDecl::isInitICE() const {
- assert(isInitKnownICE() &&
- "Check whether we already know that the initializer is an ICE");
- return Init.get<EvaluatedStmt *>()->IsICE;
-}
+bool VarDecl::hasConstantInitialization() const {
+ // In C, all globals (and only globals) have constant initialization.
+ if (hasGlobalStorage() && !getASTContext().getLangOpts().CPlusPlus)
+ return true;
-bool VarDecl::checkInitIsICE() const {
- // Initializers of weak variables are never ICEs.
- if (isWeak())
- return false;
+ // In C++, it depends on whether the evaluation at the point of definition
+ // was evaluatable as a constant initializer.
+ if (EvaluatedStmt *Eval = getEvaluatedStmt())
+ return Eval->HasConstantInitialization;
- EvaluatedStmt *Eval = ensureEvaluatedStmt();
- if (Eval->CheckedICE)
- // We have already checked whether this subexpression is an
- // integral constant expression.
- return Eval->IsICE;
+ return false;
+}
- const auto *Init = cast<Expr>(Eval->Value);
- assert(!Init->isValueDependent());
+bool VarDecl::checkForConstantInitialization(
+ SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ EvaluatedStmt *Eval = ensureEvaluatedStmt();
+ // If we ask for the value before we know whether we have a constant
+ // initializer, we can compute the wrong value (for example, due to
+ // std::is_constant_evaluated()).
+ assert(!Eval->WasEvaluated &&
+ "already evaluated var value before checking for constant init");
+ assert(getASTContext().getLangOpts().CPlusPlus && "only meaningful in C++");
- // In C++11, evaluate the initializer to check whether it's a constant
- // expression.
- if (getASTContext().getLangOpts().CPlusPlus11) {
- SmallVector<PartialDiagnosticAt, 8> Notes;
- evaluateValue(Notes);
- return Eval->IsICE;
- }
+ assert(!cast<Expr>(Eval->Value)->isValueDependent());
- // It's an ICE whether or not the definition we found is
- // out-of-line. See DR 721 and the discussion in Clang PR
- // 6206 for details.
+ // Evaluate the initializer to check whether it's a constant expression.
+ Eval->HasConstantInitialization =
+ evaluateValueImpl(Notes, true) && Notes.empty();
- if (Eval->CheckingICE)
- return false;
- Eval->CheckingICE = true;
+ // If evaluation as a constant initializer failed, allow re-evaluation as a
+ // non-constant initializer if we later find we want the value.
+ if (!Eval->HasConstantInitialization)
+ Eval->WasEvaluated = false;
- Eval->IsICE = Init->isIntegerConstantExpr(getASTContext());
- Eval->CheckingICE = false;
- Eval->CheckedICE = true;
- return Eval->IsICE;
+ return Eval->HasConstantInitialization;
}
bool VarDecl::isParameterPack() const {
@@ -2598,7 +2639,7 @@ bool VarDecl::isNoDestroy(const ASTContext &Ctx) const {
QualType::DestructionKind
VarDecl::needsDestruction(const ASTContext &Ctx) const {
- if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
+ if (EvaluatedStmt *Eval = getEvaluatedStmt())
if (Eval->HasConstantDestruction)
return QualType::DK_none;
@@ -2694,6 +2735,17 @@ SourceRange ParmVarDecl::getSourceRange() const {
return DeclaratorDecl::getSourceRange();
}
+bool ParmVarDecl::isDestroyedInCallee() const {
+ if (hasAttr<NSConsumedAttr>())
+ return true;
+
+ auto *RT = getType()->getAs<RecordType>();
+ if (RT && RT->getDecl()->isParamDestroyedInCallee())
+ return true;
+
+ return false;
+}
+
Expr *ParmVarDecl::getDefaultArg() {
assert(!hasUnparsedDefaultArg() && "Default argument is not yet parsed!");
assert(!hasUninstantiatedDefaultArg() &&
@@ -2790,7 +2842,7 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
FunctionDeclBits.HasDefaultedFunctionInfo = false;
FunctionDeclBits.HasImplicitReturnZero = false;
FunctionDeclBits.IsLateTemplateParsed = false;
- FunctionDeclBits.ConstexprKind = ConstexprKind;
+ FunctionDeclBits.ConstexprKind = static_cast<uint64_t>(ConstexprKind);
FunctionDeclBits.InstantiationIsPending = false;
FunctionDeclBits.UsesSEHTry = false;
FunctionDeclBits.UsesFPIntrin = false;
@@ -2867,10 +2919,55 @@ bool FunctionDecl::hasTrivialBody() const {
return false;
}
-bool FunctionDecl::isDefined(const FunctionDecl *&Definition) const {
- for (auto I : redecls()) {
- if (I->isThisDeclarationADefinition()) {
- Definition = I;
+bool FunctionDecl::isThisDeclarationInstantiatedFromAFriendDefinition() const {
+ if (!getFriendObjectKind())
+ return false;
+
+ // Check for a friend function instantiated from a friend function
+ // definition in a templated class.
+ if (const FunctionDecl *InstantiatedFrom =
+ getInstantiatedFromMemberFunction())
+ return InstantiatedFrom->getFriendObjectKind() &&
+ InstantiatedFrom->isThisDeclarationADefinition();
+
+ // Check for a friend function template instantiated from a friend
+ // function template definition in a templated class.
+ if (const FunctionTemplateDecl *Template = getDescribedFunctionTemplate()) {
+ if (const FunctionTemplateDecl *InstantiatedFrom =
+ Template->getInstantiatedFromMemberTemplate())
+ return InstantiatedFrom->getFriendObjectKind() &&
+ InstantiatedFrom->isThisDeclarationADefinition();
+ }
+
+ return false;
+}
+
+bool FunctionDecl::isDefined(const FunctionDecl *&Definition,
+ bool CheckForPendingFriendDefinition) const {
+ for (const FunctionDecl *FD : redecls()) {
+ if (FD->isThisDeclarationADefinition()) {
+ Definition = FD;
+ return true;
+ }
+
+ // If this is a friend function defined in a class template, it does not
+ // have a body until it is used, nevertheless it is a definition, see
+ // [temp.inst]p2:
+ //
+ // ... for the purpose of determining whether an instantiated redeclaration
+ // is valid according to [basic.def.odr] and [class.mem], a declaration that
+ // corresponds to a definition in the template is considered to be a
+ // definition.
+ //
+ // The following code must produce redefinition error:
+ //
+ // template<typename T> struct C20 { friend void func_20() {} };
+ // C20<int> c20i;
+ // void func_20() {}
+ //
+ if (CheckForPendingFriendDefinition &&
+ FD->isThisDeclarationInstantiatedFromAFriendDefinition()) {
+ Definition = FD;
return true;
}
}
@@ -3162,44 +3259,24 @@ FunctionDecl *FunctionDecl::getCanonicalDecl() { return getFirstDecl(); }
/// functions as their wrapped builtins. This shouldn't be done in general, but
/// it's useful in Sema to diagnose calls to wrappers based on their semantics.
unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
- unsigned BuiltinID;
+ unsigned BuiltinID = 0;
if (const auto *ABAA = getAttr<ArmBuiltinAliasAttr>()) {
BuiltinID = ABAA->getBuiltinName()->getBuiltinID();
- } else {
- if (!getIdentifier())
- return 0;
-
- BuiltinID = getIdentifier()->getBuiltinID();
+ } else if (const auto *A = getAttr<BuiltinAttr>()) {
+ BuiltinID = A->getID();
}
if (!BuiltinID)
return 0;
- ASTContext &Context = getASTContext();
- if (Context.getLangOpts().CPlusPlus) {
- const auto *LinkageDecl =
- dyn_cast<LinkageSpecDecl>(getFirstDecl()->getDeclContext());
- // In C++, the first declaration of a builtin is always inside an implicit
- // extern "C".
- // FIXME: A recognised library function may not be directly in an extern "C"
- // declaration, for instance "extern "C" { namespace std { decl } }".
- if (!LinkageDecl) {
- if (BuiltinID == Builtin::BI__GetExceptionInfo &&
- Context.getTargetInfo().getCXXABI().isMicrosoft())
- return Builtin::BI__GetExceptionInfo;
- return 0;
- }
- if (LinkageDecl->getLanguage() != LinkageSpecDecl::lang_c)
- return 0;
- }
-
// If the function is marked "overloadable", it has a different mangled name
// and is not the C library function.
if (!ConsiderWrapperFunctions && hasAttr<OverloadableAttr>() &&
!hasAttr<ArmBuiltinAliasAttr>())
return 0;
+ ASTContext &Context = getASTContext();
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return BuiltinID;
@@ -3515,8 +3592,7 @@ bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
OverloadedOperatorKind FunctionDecl::getOverloadedOperator() const {
if (getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
return getDeclName().getCXXOverloadedOperator();
- else
- return OO_None;
+ return OO_None;
}
/// getLiteralIdentifier - The literal suffix identifier this function
@@ -3524,8 +3600,7 @@ OverloadedOperatorKind FunctionDecl::getOverloadedOperator() const {
const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const {
if (getDeclName().getNameKind() == DeclarationName::CXXLiteralOperatorName)
return getDeclName().getCXXLiteralIdentifier();
- else
- return nullptr;
+ return nullptr;
}
FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const {
@@ -3640,7 +3715,13 @@ FunctionDecl::getTemplateInstantiationPattern(bool ForDefinition) const {
return getDefinitionOrSelf(getPrimaryTemplate()->getTemplatedDecl());
}
- if (MemberSpecializationInfo *Info = getMemberSpecializationInfo()) {
+ // Check for a declaration of this function that was instantiated from a
+ // friend definition.
+ const FunctionDecl *FD = nullptr;
+ if (!isDefined(FD, /*CheckForPendingFriendDefinition=*/true))
+ FD = this;
+
+ if (MemberSpecializationInfo *Info = FD->getMemberSpecializationInfo()) {
if (ForDefinition &&
!clang::isTemplateInstantiation(Info->getTemplateSpecializationKind()))
return nullptr;
@@ -3852,8 +3933,8 @@ SourceLocation FunctionDecl::getPointOfInstantiation() const {
= TemplateOrSpecialization.dyn_cast<
FunctionTemplateSpecializationInfo*>())
return FTSInfo->getPointOfInstantiation();
- else if (MemberSpecializationInfo *MSInfo
- = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>())
+ if (MemberSpecializationInfo *MSInfo =
+ TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>())
return MSInfo->getPointOfInstantiation();
return SourceLocation();
@@ -3960,34 +4041,40 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
case Builtin::BIbzero:
return Builtin::BIbzero;
+ case Builtin::BIfree:
+ return Builtin::BIfree;
+
default:
if (isExternC()) {
if (FnInfo->isStr("memset"))
return Builtin::BImemset;
- else if (FnInfo->isStr("memcpy"))
+ if (FnInfo->isStr("memcpy"))
return Builtin::BImemcpy;
- else if (FnInfo->isStr("mempcpy"))
+ if (FnInfo->isStr("mempcpy"))
return Builtin::BImempcpy;
- else if (FnInfo->isStr("memmove"))
+ if (FnInfo->isStr("memmove"))
return Builtin::BImemmove;
- else if (FnInfo->isStr("memcmp"))
+ if (FnInfo->isStr("memcmp"))
return Builtin::BImemcmp;
- else if (FnInfo->isStr("bcmp"))
+ if (FnInfo->isStr("bcmp"))
return Builtin::BIbcmp;
- else if (FnInfo->isStr("strncpy"))
+ if (FnInfo->isStr("strncpy"))
return Builtin::BIstrncpy;
- else if (FnInfo->isStr("strncmp"))
+ if (FnInfo->isStr("strncmp"))
return Builtin::BIstrncmp;
- else if (FnInfo->isStr("strncasecmp"))
+ if (FnInfo->isStr("strncasecmp"))
return Builtin::BIstrncasecmp;
- else if (FnInfo->isStr("strncat"))
+ if (FnInfo->isStr("strncat"))
return Builtin::BIstrncat;
- else if (FnInfo->isStr("strndup"))
+ if (FnInfo->isStr("strndup"))
return Builtin::BIstrndup;
- else if (FnInfo->isStr("strlen"))
+ if (FnInfo->isStr("strlen"))
return Builtin::BIstrlen;
- else if (FnInfo->isStr("bzero"))
+ if (FnInfo->isStr("bzero"))
return Builtin::BIbzero;
+ } else if (isInStdNamespace()) {
+ if (FnInfo->isStr("free"))
+ return Builtin::BIfree;
}
break;
}
@@ -4705,11 +4792,9 @@ char *Buffer = new (getASTContext(), 1) char[Name.size() + 1];
void ValueDecl::anchor() {}
bool ValueDecl::isWeak() const {
- for (const auto *I : attrs())
- if (isa<WeakAttr>(I) || isa<WeakRefAttr>(I))
- return true;
-
- return isWeakImported();
+ auto *MostRecent = getMostRecentDecl();
+ return MostRecent->hasAttr<WeakAttr>() ||
+ MostRecent->hasAttr<WeakRefAttr>() || isWeakImported();
}
void ImplicitParamDecl::anchor() {}
@@ -4748,9 +4833,9 @@ FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
}
FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- return new (C, ID) FunctionDecl(Function, C, nullptr, SourceLocation(),
- DeclarationNameInfo(), QualType(), nullptr,
- SC_None, false, CSK_unspecified, nullptr);
+ return new (C, ID) FunctionDecl(
+ Function, C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(),
+ nullptr, SC_None, false, ConstexprSpecKind::Unspecified, nullptr);
}
BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp
index da1eadd9d931..c26d6d1a42ea 100644
--- a/clang/lib/AST/DeclBase.cpp
+++ b/clang/lib/AST/DeclBase.cpp
@@ -230,11 +230,11 @@ bool Decl::isTemplateDecl() const {
TemplateDecl *Decl::getDescribedTemplate() const {
if (auto *FD = dyn_cast<FunctionDecl>(this))
return FD->getDescribedFunctionTemplate();
- else if (auto *RD = dyn_cast<CXXRecordDecl>(this))
+ if (auto *RD = dyn_cast<CXXRecordDecl>(this))
return RD->getDescribedClassTemplate();
- else if (auto *VD = dyn_cast<VarDecl>(this))
+ if (auto *VD = dyn_cast<VarDecl>(this))
return VD->getDescribedVarTemplate();
- else if (auto *AD = dyn_cast<TypeAliasDecl>(this))
+ if (auto *AD = dyn_cast<TypeAliasDecl>(this))
return AD->getDescribedAliasTemplate();
return nullptr;
@@ -695,24 +695,23 @@ bool Decl::canBeWeakImported(bool &IsDefinition) const {
return false;
}
return true;
-
+ }
// Functions, if they aren't definitions.
- } else if (const auto *FD = dyn_cast<FunctionDecl>(this)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(this)) {
if (FD->hasBody()) {
IsDefinition = true;
return false;
}
return true;
+ }
// Objective-C classes, if this is the non-fragile runtime.
- } else if (isa<ObjCInterfaceDecl>(this) &&
+ if (isa<ObjCInterfaceDecl>(this) &&
getASTContext().getLangOpts().ObjCRuntime.hasWeakClassImport()) {
return true;
-
- // Nothing else.
- } else {
- return false;
}
+ // Nothing else.
+ return false;
}
bool Decl::isWeakImported() const {
@@ -720,7 +719,7 @@ bool Decl::isWeakImported() const {
if (!canBeWeakImported(IsDefinition))
return false;
- for (const auto *A : attrs()) {
+ for (const auto *A : getMostRecentDecl()->attrs()) {
if (isa<WeakImportAttr>(A))
return true;
@@ -835,6 +834,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case ExternCContext:
case Decomposition:
case MSGuid:
+ case TemplateParamObject:
case UsingDirective:
case BuiltinTemplate:
@@ -970,21 +970,19 @@ bool Decl::AccessDeclContextSanity() const {
// 5. it's invalid
// 6. it's a C++0x static_assert.
// 7. it's a block literal declaration
- if (isa<TranslationUnitDecl>(this) ||
- isa<TemplateTypeParmDecl>(this) ||
- isa<NonTypeTemplateParmDecl>(this) ||
- !getDeclContext() ||
- !isa<CXXRecordDecl>(getDeclContext()) ||
- isInvalidDecl() ||
- isa<StaticAssertDecl>(this) ||
- isa<BlockDecl>(this) ||
+ // 8. it's a temporary with lifetime extended due to being default value.
+ if (isa<TranslationUnitDecl>(this) || isa<TemplateTypeParmDecl>(this) ||
+ isa<NonTypeTemplateParmDecl>(this) || !getDeclContext() ||
+ !isa<CXXRecordDecl>(getDeclContext()) || isInvalidDecl() ||
+ isa<StaticAssertDecl>(this) || isa<BlockDecl>(this) ||
// FIXME: a ParmVarDecl can have ClassTemplateSpecialization
// as DeclContext (?).
isa<ParmVarDecl>(this) ||
// FIXME: a ClassTemplateSpecialization or CXXRecordDecl can have
// AS_none as access specifier.
isa<CXXRecordDecl>(this) ||
- isa<ClassScopeFunctionSpecializationDecl>(this))
+ isa<ClassScopeFunctionSpecializationDecl>(this) ||
+ isa<LifetimeExtendedTemporaryDecl>(this))
return true;
assert(Access != AS_none &&
@@ -1028,16 +1026,16 @@ template <class T> static Decl *getNonClosureContext(T *D) {
MD->getParent()->isLambda())
return getNonClosureContext(MD->getParent()->getParent());
return MD;
- } else if (auto *FD = dyn_cast<FunctionDecl>(D))
+ }
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
return FD;
- else if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
return MD;
- else if (auto *BD = dyn_cast<BlockDecl>(D))
+ if (auto *BD = dyn_cast<BlockDecl>(D))
return getNonClosureContext(BD->getParent());
- else if (auto *CD = dyn_cast<CapturedDecl>(D))
+ if (auto *CD = dyn_cast<CapturedDecl>(D))
return getNonClosureContext(CD->getParent());
- else
- return nullptr;
+ return nullptr;
}
Decl *Decl::getNonClosureContext() {
@@ -1172,10 +1170,8 @@ bool DeclContext::isDependentContext() const {
bool DeclContext::isTransparentContext() const {
if (getDeclKind() == Decl::Enum)
return !cast<EnumDecl>(this)->isScoped();
- else if (getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export)
- return true;
- return false;
+ return getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export;
}
static bool isLinkageSpecContext(const DeclContext *DC,
@@ -1487,6 +1483,13 @@ static bool shouldBeHidden(NamedDecl *D) {
if (FD->isFunctionTemplateSpecialization())
return true;
+ // Hide destructors that are invalid. There should always be one destructor,
+ // but if it is an invalid decl, another one is created. We need to hide the
+ // invalid one from places that expect exactly one destructor, like the
+ // serialization code.
+ if (isa<CXXDestructorDecl>(D) && D->isInvalidDecl())
+ return true;
+
return false;
}
@@ -2020,9 +2023,9 @@ DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C,
// Allocate the copy of the PartialDiagnostic via the ASTContext's
// BumpPtrAllocator, rather than the ASTContext itself.
- PartialDiagnostic::Storage *DiagStorage = nullptr;
+ DiagnosticStorage *DiagStorage = nullptr;
if (PDiag.hasStorage())
- DiagStorage = new (C) PartialDiagnostic::Storage;
+ DiagStorage = new (C) DiagnosticStorage;
auto *DD = new (C) DependentDiagnostic(PDiag, DiagStorage);
diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp
index 6f1fd2f14ede..0368ada0b81c 100644
--- a/clang/lib/AST/DeclCXX.cpp
+++ b/clang/lib/AST/DeclCXX.cpp
@@ -81,7 +81,9 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
HasPublicFields(false), HasMutableFields(false), HasVariantMembers(false),
HasOnlyCMembers(true), HasInClassInitializer(false),
HasUninitializedReferenceMember(false), HasUninitializedFields(false),
- HasInheritedConstructor(false), HasInheritedAssignment(false),
+ HasInheritedConstructor(false),
+ HasInheritedDefaultConstructor(false),
+ HasInheritedAssignment(false),
NeedOverloadResolutionForCopyConstructor(false),
NeedOverloadResolutionForMoveConstructor(false),
NeedOverloadResolutionForCopyAssignment(false),
@@ -100,7 +102,7 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
DefaultedDefaultConstructorIsConstexpr(true),
HasConstexprDefaultConstructor(false),
DefaultedDestructorIsConstexpr(true),
- HasNonLiteralTypeFieldsOrBases(false),
+ HasNonLiteralTypeFieldsOrBases(false), StructuralIfLiteral(true),
UserProvidedDefaultConstructor(false), DeclaredSpecialMembers(0),
ImplicitCopyConstructorCanHaveConstParamForVBase(true),
ImplicitCopyConstructorCanHaveConstParamForNonVBase(true),
@@ -258,9 +260,15 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// C++1z [dcl.init.agg]p1:
// An aggregate is a class with [...] no private or protected base classes
- if (Base->getAccessSpecifier() != AS_public)
+ if (Base->getAccessSpecifier() != AS_public) {
data().Aggregate = false;
+ // C++20 [temp.param]p7:
+ // A structural type is [...] a literal class type with [...] all base
+ // classes [...] public
+ data().StructuralIfLiteral = false;
+ }
+
// C++ [class.virtual]p1:
// A class that declares or inherits a virtual function is called a
// polymorphic class.
@@ -536,6 +544,13 @@ void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) {
// array thereof, that class type shall have a constexpr destructor
if (!Subobj->hasConstexprDestructor())
data().DefaultedDestructorIsConstexpr = false;
+
+ // C++20 [temp.param]p7:
+ // A structural type is [...] a literal class type [for which] the types
+ // of all base classes and non-static data members are structural types or
+ // (possibly multi-dimensional) array thereof
+ if (!Subobj->data().StructuralIfLiteral)
+ data().StructuralIfLiteral = false;
}
bool CXXRecordDecl::hasConstexprDestructor() const {
@@ -801,6 +816,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
// constructor [...]
if (Constructor->isConstexpr() && !Constructor->isCopyOrMoveConstructor())
data().HasConstexprNonCopyMoveConstructor = true;
+ if (!isa<CXXConstructorDecl>(D) && Constructor->isDefaultConstructor())
+ data().HasInheritedDefaultConstructor = true;
}
// Handle destructors.
@@ -956,6 +973,11 @@ void CXXRecordDecl::addedMember(Decl *D) {
if (D->getAccess() == AS_private || D->getAccess() == AS_protected) {
data().Aggregate = false;
data().PlainOldData = false;
+
+ // C++20 [temp.param]p7:
+ // A structural type is [...] a literal class type [for which] all
+ // non-static data members are public
+ data().StructuralIfLiteral = false;
}
// Track whether this is the first field. We use this when checking
@@ -980,9 +1002,15 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
// Keep track of the presence of mutable fields.
- if (Field->isMutable())
+ if (Field->isMutable()) {
data().HasMutableFields = true;
+ // C++20 [temp.param]p7:
+ // A structural type is [...] a literal class type [for which] all
+ // non-static data members are public
+ data().StructuralIfLiteral = false;
+ }
+
// C++11 [class.union]p8, DR1460:
// If X is a union, a non-static data member of X that is not an anonymous
// union is a variant member of X.
@@ -1315,6 +1343,14 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().DefaultedCopyAssignmentIsDeleted = true;
data().DefaultedMoveAssignmentIsDeleted = true;
}
+
+ // C++20 [temp.param]p7:
+ // A structural type is [...] a literal class type [for which] the
+ // types of all non-static data members are structural types or
+ // (possibly multidimensional) array thereof
+ // We deal with class types elsewhere.
+ if (!T->isStructuralType())
+ data().StructuralIfLiteral = false;
}
// C++14 [meta.unary.prop]p4:
@@ -1383,8 +1419,8 @@ void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) {
data().DeclaredNonTrivialSpecialMembers |= SMKind;
}
-void CXXRecordDecl::setCaptures(ArrayRef<LambdaCapture> Captures) {
- ASTContext &Context = getASTContext();
+void CXXRecordDecl::setCaptures(ASTContext &Context,
+ ArrayRef<LambdaCapture> Captures) {
CXXRecordDecl::LambdaDefinitionData &Data = getLambdaData();
// Copy captures.
@@ -1475,18 +1511,38 @@ CXXMethodDecl *CXXRecordDecl::getLambdaCallOperator() const {
}
CXXMethodDecl* CXXRecordDecl::getLambdaStaticInvoker() const {
- if (!isLambda()) return nullptr;
+ CXXMethodDecl *CallOp = getLambdaCallOperator();
+ CallingConv CC = CallOp->getType()->castAs<FunctionType>()->getCallConv();
+ return getLambdaStaticInvoker(CC);
+}
+
+static DeclContext::lookup_result
+getLambdaStaticInvokers(const CXXRecordDecl &RD) {
+ assert(RD.isLambda() && "Must be a lambda");
DeclarationName Name =
- &getASTContext().Idents.get(getLambdaStaticInvokerName());
- DeclContext::lookup_result Invoker = lookup(Name);
- if (Invoker.empty()) return nullptr;
- assert(allLookupResultsAreTheSame(Invoker) &&
- "More than one static invoker operator!");
- NamedDecl *InvokerFun = Invoker.front();
- if (const auto *InvokerTemplate = dyn_cast<FunctionTemplateDecl>(InvokerFun))
+ &RD.getASTContext().Idents.get(getLambdaStaticInvokerName());
+ return RD.lookup(Name);
+}
+
+static CXXMethodDecl *getInvokerAsMethod(NamedDecl *ND) {
+ if (const auto *InvokerTemplate = dyn_cast<FunctionTemplateDecl>(ND))
return cast<CXXMethodDecl>(InvokerTemplate->getTemplatedDecl());
+ return cast<CXXMethodDecl>(ND);
+}
+
+CXXMethodDecl *CXXRecordDecl::getLambdaStaticInvoker(CallingConv CC) const {
+ if (!isLambda())
+ return nullptr;
+ DeclContext::lookup_result Invoker = getLambdaStaticInvokers(*this);
+
+ for (NamedDecl *ND : Invoker) {
+ const auto *FTy =
+ cast<ValueDecl>(ND->getAsFunction())->getType()->castAs<FunctionType>();
+ if (FTy->getCallConv() == CC)
+ return getInvokerAsMethod(ND);
+ }
- return cast<CXXMethodDecl>(InvokerFun);
+ return nullptr;
}
void CXXRecordDecl::getCaptureFields(
@@ -2135,10 +2191,10 @@ CXXMethodDecl *CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
}
CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- return new (C, ID) CXXMethodDecl(
- CXXMethod, C, nullptr, SourceLocation(), DeclarationNameInfo(),
- QualType(), nullptr, SC_None, false, CSK_unspecified, SourceLocation(),
- nullptr);
+ return new (C, ID)
+ CXXMethodDecl(CXXMethod, C, nullptr, SourceLocation(),
+ DeclarationNameInfo(), QualType(), nullptr, SC_None, false,
+ ConstexprSpecKind::Unspecified, SourceLocation(), nullptr);
}
CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
@@ -2427,14 +2483,8 @@ bool CXXMethodDecl::hasInlineBody() const {
bool CXXMethodDecl::isLambdaStaticInvoker() const {
const CXXRecordDecl *P = getParent();
- if (P->isLambda()) {
- if (const CXXMethodDecl *StaticInvoker = P->getLambdaStaticInvoker()) {
- if (StaticInvoker == this) return true;
- if (P->isGenericLambda() && this->isFunctionTemplateSpecialization())
- return StaticInvoker == this->getPrimaryTemplate()->getTemplatedDecl();
- }
- }
- return false;
+ return P->isLambda() && getDeclName().isIdentifier() &&
+ getName() == getLambdaStaticInvokerName();
}
CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
@@ -2543,10 +2593,10 @@ CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C,
unsigned Extra =
additionalSizeToAlloc<InheritedConstructor, ExplicitSpecifier>(
isInheritingConstructor, hasTraillingExplicit);
- auto *Result = new (C, ID, Extra)
- CXXConstructorDecl(C, nullptr, SourceLocation(), DeclarationNameInfo(),
- QualType(), nullptr, ExplicitSpecifier(), false, false,
- CSK_unspecified, InheritedConstructor(), nullptr);
+ auto *Result = new (C, ID, Extra) CXXConstructorDecl(
+ C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
+ ExplicitSpecifier(), false, false, ConstexprSpecKind::Unspecified,
+ InheritedConstructor(), nullptr);
Result->setInheritingConstructor(isInheritingConstructor);
Result->CXXConstructorDeclBits.HasTrailingExplicitSpecifier =
hasTraillingExplicit;
@@ -2684,10 +2734,9 @@ void CXXDestructorDecl::anchor() {}
CXXDestructorDecl *
CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- return new (C, ID)
- CXXDestructorDecl(C, nullptr, SourceLocation(), DeclarationNameInfo(),
- QualType(), nullptr, false, false, CSK_unspecified,
- nullptr);
+ return new (C, ID) CXXDestructorDecl(
+ C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
+ false, false, ConstexprSpecKind::Unspecified, nullptr);
}
CXXDestructorDecl *CXXDestructorDecl::Create(
@@ -2720,7 +2769,8 @@ CXXConversionDecl *
CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) CXXConversionDecl(
C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
- false, ExplicitSpecifier(), CSK_unspecified, SourceLocation(), nullptr);
+ false, ExplicitSpecifier(), ConstexprSpecKind::Unspecified,
+ SourceLocation(), nullptr);
}
CXXConversionDecl *CXXConversionDecl::Create(
@@ -3301,12 +3351,7 @@ static const char *getAccessName(AccessSpecifier AS) {
llvm_unreachable("Invalid access specifier!");
}
-const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
- AccessSpecifier AS) {
- return DB << getAccessName(AS);
-}
-
-const PartialDiagnostic &clang::operator<<(const PartialDiagnostic &DB,
- AccessSpecifier AS) {
+const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
+ AccessSpecifier AS) {
return DB << getAccessName(AS);
}
diff --git a/clang/lib/AST/DeclObjC.cpp b/clang/lib/AST/DeclObjC.cpp
index 5c8b34731f36..5f82fcec90e3 100644
--- a/clang/lib/AST/DeclObjC.cpp
+++ b/clang/lib/AST/DeclObjC.cpp
@@ -33,6 +33,7 @@
#include <cassert>
#include <cstdint>
#include <cstring>
+#include <queue>
#include <utility>
using namespace clang;
@@ -949,7 +950,8 @@ ObjCMethodDecl *ObjCMethodDecl::getNextRedeclarationImpl() {
if (!Redecl && isRedeclaration()) {
// This is the last redeclaration, go back to the first method.
return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
- isInstanceMethod());
+ isInstanceMethod(),
+ /*AllowHidden=*/true);
}
return Redecl ? Redecl : this;
@@ -982,7 +984,8 @@ ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() {
if (isRedeclaration()) {
// It is possible that we have not done deserializing the ObjCMethod yet.
ObjCMethodDecl *MD =
- cast<ObjCContainerDecl>(CtxD)->getMethod(Sel, isInstanceMethod());
+ cast<ObjCContainerDecl>(CtxD)->getMethod(Sel, isInstanceMethod(),
+ /*AllowHidden=*/true);
return MD ? MD : this;
}
@@ -1165,6 +1168,14 @@ ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
llvm_unreachable("unknown method context");
}
+ObjCCategoryDecl *ObjCMethodDecl::getCategory() {
+ if (auto *CD = dyn_cast<ObjCCategoryDecl>(getDeclContext()))
+ return CD;
+ if (auto *IMD = dyn_cast<ObjCCategoryImplDecl>(getDeclContext()))
+ return IMD->getCategoryDecl();
+ return nullptr;
+}
+
SourceRange ObjCMethodDecl::getReturnTypeSourceRange() const {
const auto *TSI = getReturnTypeSourceInfo();
if (TSI)
@@ -1299,8 +1310,9 @@ void ObjCMethodDecl::getOverriddenMethods(
const ObjCMethodDecl *Method = this;
if (Method->isRedeclaration()) {
- Method = cast<ObjCContainerDecl>(Method->getDeclContext())->
- getMethod(Method->getSelector(), Method->isInstanceMethod());
+ Method = cast<ObjCContainerDecl>(Method->getDeclContext())
+ ->getMethod(Method->getSelector(), Method->isInstanceMethod(),
+ /*AllowHidden=*/true);
}
if (Method->isOverriding()) {
@@ -1449,9 +1461,7 @@ SourceRange ObjCTypeParamDecl::getSourceRange() const {
ObjCTypeParamList::ObjCTypeParamList(SourceLocation lAngleLoc,
ArrayRef<ObjCTypeParamDecl *> typeParams,
SourceLocation rAngleLoc)
- : NumParams(typeParams.size()) {
- Brackets.Begin = lAngleLoc.getRawEncoding();
- Brackets.End = rAngleLoc.getRawEncoding();
+ : Brackets(lAngleLoc, rAngleLoc), NumParams(typeParams.size()) {
std::copy(typeParams.begin(), typeParams.end(), begin());
}
@@ -1897,6 +1907,27 @@ ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C,
return Result;
}
+bool ObjCProtocolDecl::isNonRuntimeProtocol() const {
+ return hasAttr<ObjCNonRuntimeProtocolAttr>();
+}
+
+void ObjCProtocolDecl::getImpliedProtocols(
+ llvm::DenseSet<const ObjCProtocolDecl *> &IPs) const {
+ std::queue<const ObjCProtocolDecl *> WorkQueue;
+ WorkQueue.push(this);
+
+ while (!WorkQueue.empty()) {
+ const auto *PD = WorkQueue.front();
+ WorkQueue.pop();
+ for (const auto *Parent : PD->protocols()) {
+ const auto *Can = Parent->getCanonicalDecl();
+ auto Result = IPs.insert(Can);
+ if (Result.second)
+ WorkQueue.push(Parent);
+ }
+ }
+}
+
ObjCProtocolDecl *ObjCProtocolDecl::lookupProtocolNamed(IdentifierInfo *Name) {
ObjCProtocolDecl *PDecl = this;
diff --git a/clang/lib/AST/DeclOpenMP.cpp b/clang/lib/AST/DeclOpenMP.cpp
index af321280d417..867ef31656f7 100644
--- a/clang/lib/AST/DeclOpenMP.cpp
+++ b/clang/lib/AST/DeclOpenMP.cpp
@@ -23,16 +23,14 @@ using namespace clang;
// OMPThreadPrivateDecl Implementation.
//===----------------------------------------------------------------------===//
-void OMPThreadPrivateDecl::anchor() { }
+void OMPThreadPrivateDecl::anchor() {}
OMPThreadPrivateDecl *OMPThreadPrivateDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation L,
ArrayRef<Expr *> VL) {
- OMPThreadPrivateDecl *D =
- new (C, DC, additionalSizeToAlloc<Expr *>(VL.size()))
- OMPThreadPrivateDecl(OMPThreadPrivate, DC, L);
- D->NumVars = VL.size();
+ auto *D = OMPDeclarativeDirective::createDirective<OMPThreadPrivateDecl>(
+ C, DC, llvm::None, VL.size(), L);
D->setVars(VL);
return D;
}
@@ -40,16 +38,14 @@ OMPThreadPrivateDecl *OMPThreadPrivateDecl::Create(ASTContext &C,
OMPThreadPrivateDecl *OMPThreadPrivateDecl::CreateDeserialized(ASTContext &C,
unsigned ID,
unsigned N) {
- OMPThreadPrivateDecl *D = new (C, ID, additionalSizeToAlloc<Expr *>(N))
- OMPThreadPrivateDecl(OMPThreadPrivate, nullptr, SourceLocation());
- D->NumVars = N;
- return D;
+ return OMPDeclarativeDirective::createEmptyDirective<OMPThreadPrivateDecl>(
+ C, ID, 0, N);
}
void OMPThreadPrivateDecl::setVars(ArrayRef<Expr *> VL) {
- assert(VL.size() == NumVars &&
+ assert(VL.size() == Data->getNumChildren() &&
"Number of variables is not the same as the preallocated buffer");
- std::uninitialized_copy(VL.begin(), VL.end(), getTrailingObjects<Expr *>());
+ llvm::copy(VL, getVars().begin());
}
//===----------------------------------------------------------------------===//
@@ -61,38 +57,23 @@ void OMPAllocateDecl::anchor() { }
OMPAllocateDecl *OMPAllocateDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, ArrayRef<Expr *> VL,
ArrayRef<OMPClause *> CL) {
- OMPAllocateDecl *D = new (
- C, DC, additionalSizeToAlloc<Expr *, OMPClause *>(VL.size(), CL.size()))
- OMPAllocateDecl(OMPAllocate, DC, L);
- D->NumVars = VL.size();
+ auto *D = OMPDeclarativeDirective::createDirective<OMPAllocateDecl>(
+ C, DC, CL, VL.size(), L);
D->setVars(VL);
- D->NumClauses = CL.size();
- D->setClauses(CL);
return D;
}
OMPAllocateDecl *OMPAllocateDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NVars,
unsigned NClauses) {
- OMPAllocateDecl *D =
- new (C, ID, additionalSizeToAlloc<Expr *, OMPClause *>(NVars, NClauses))
- OMPAllocateDecl(OMPAllocate, nullptr, SourceLocation());
- D->NumVars = NVars;
- D->NumClauses = NClauses;
- return D;
+ return OMPDeclarativeDirective::createEmptyDirective<OMPAllocateDecl>(
+ C, ID, NClauses, NVars, SourceLocation());
}
void OMPAllocateDecl::setVars(ArrayRef<Expr *> VL) {
- assert(VL.size() == NumVars &&
+ assert(VL.size() == Data->getNumChildren() &&
"Number of variables is not the same as the preallocated buffer");
- std::uninitialized_copy(VL.begin(), VL.end(), getTrailingObjects<Expr *>());
-}
-
-void OMPAllocateDecl::setClauses(ArrayRef<OMPClause *> CL) {
- assert(CL.size() == NumClauses &&
- "Number of variables is not the same as the preallocated buffer");
- std::uninitialized_copy(CL.begin(), CL.end(),
- getTrailingObjects<OMPClause *>());
+ llvm::copy(VL, getVars().begin());
}
//===----------------------------------------------------------------------===//
@@ -104,27 +85,14 @@ void OMPRequiresDecl::anchor() {}
OMPRequiresDecl *OMPRequiresDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
ArrayRef<OMPClause *> CL) {
- OMPRequiresDecl *D =
- new (C, DC, additionalSizeToAlloc<OMPClause *>(CL.size()))
- OMPRequiresDecl(OMPRequires, DC, L);
- D->NumClauses = CL.size();
- D->setClauses(CL);
- return D;
+ return OMPDeclarativeDirective::createDirective<OMPRequiresDecl>(C, DC, CL, 0,
+ L);
}
OMPRequiresDecl *OMPRequiresDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned N) {
- OMPRequiresDecl *D = new (C, ID, additionalSizeToAlloc<OMPClause *>(N))
- OMPRequiresDecl(OMPRequires, nullptr, SourceLocation());
- D->NumClauses = N;
- return D;
-}
-
-void OMPRequiresDecl::setClauses(ArrayRef<OMPClause *> CL) {
- assert(CL.size() == NumClauses &&
- "Number of clauses is not the same as the preallocated buffer");
- std::uninitialized_copy(CL.begin(), CL.end(),
- getTrailingObjects<OMPClause *>());
+ return OMPDeclarativeDirective::createEmptyDirective<OMPRequiresDecl>(
+ C, ID, N, 0, SourceLocation());
}
//===----------------------------------------------------------------------===//
@@ -171,48 +139,20 @@ OMPDeclareReductionDecl::getPrevDeclInScope() const {
void OMPDeclareMapperDecl::anchor() {}
-OMPDeclareMapperDecl *
-OMPDeclareMapperDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
- DeclarationName Name, QualType T,
- DeclarationName VarName,
- OMPDeclareMapperDecl *PrevDeclInScope) {
- return new (C, DC) OMPDeclareMapperDecl(OMPDeclareMapper, DC, L, Name, T,
- VarName, PrevDeclInScope);
+OMPDeclareMapperDecl *OMPDeclareMapperDecl::Create(
+ ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name,
+ QualType T, DeclarationName VarName, ArrayRef<OMPClause *> Clauses,
+ OMPDeclareMapperDecl *PrevDeclInScope) {
+ return OMPDeclarativeDirective::createDirective<OMPDeclareMapperDecl>(
+ C, DC, Clauses, 1, L, Name, T, VarName, PrevDeclInScope);
}
OMPDeclareMapperDecl *OMPDeclareMapperDecl::CreateDeserialized(ASTContext &C,
unsigned ID,
unsigned N) {
- auto *D = new (C, ID)
- OMPDeclareMapperDecl(OMPDeclareMapper, /*DC=*/nullptr, SourceLocation(),
- DeclarationName(), QualType(), DeclarationName(),
- /*PrevDeclInScope=*/nullptr);
- if (N) {
- auto **ClauseStorage = C.Allocate<OMPClause *>(N);
- D->Clauses = llvm::makeMutableArrayRef<OMPClause *>(ClauseStorage, N);
- }
- return D;
-}
-
-/// Creates an array of clauses to this mapper declaration and intializes
-/// them. The space used to store clause pointers is dynamically allocated,
-/// because we do not know the number of clauses when creating
-/// OMPDeclareMapperDecl
-void OMPDeclareMapperDecl::CreateClauses(ASTContext &C,
- ArrayRef<OMPClause *> CL) {
- assert(Clauses.empty() && "Number of clauses should be 0 on initialization");
- size_t NumClauses = CL.size();
- if (NumClauses) {
- auto **ClauseStorage = C.Allocate<OMPClause *>(NumClauses);
- Clauses = llvm::makeMutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
- setClauses(CL);
- }
-}
-
-void OMPDeclareMapperDecl::setClauses(ArrayRef<OMPClause *> CL) {
- assert(CL.size() == Clauses.size() &&
- "Number of clauses is not the same as the preallocated buffer");
- std::uninitialized_copy(CL.begin(), CL.end(), Clauses.data());
+ return OMPDeclarativeDirective::createEmptyDirective<OMPDeclareMapperDecl>(
+ C, ID, N, 1, SourceLocation(), DeclarationName(), QualType(),
+ DeclarationName(), /*PrevDeclInScope=*/nullptr);
}
OMPDeclareMapperDecl *OMPDeclareMapperDecl::getPrevDeclInScope() {
diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp
index 2e48b2b46c4d..ca64f8f6cfbe 100644
--- a/clang/lib/AST/DeclPrinter.cpp
+++ b/clang/lib/AST/DeclPrinter.cpp
@@ -528,7 +528,8 @@ void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
prettyPrintAttributes(D);
- Out << ' ' << *D;
+ if (D->getDeclName())
+ Out << ' ' << D->getDeclName();
if (D->isFixed())
Out << " : " << D->getIntegerType().stream(Policy);
@@ -933,7 +934,12 @@ void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
void DeclPrinter::VisitNamespaceDecl(NamespaceDecl *D) {
if (D->isInline())
Out << "inline ";
- Out << "namespace " << *D << " {\n";
+
+ Out << "namespace ";
+ if (D->getDeclName())
+ Out << D->getDeclName() << ' ';
+ Out << "{\n";
+
VisitDeclContext(D);
Indent() << "}";
}
@@ -1091,10 +1097,15 @@ void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
if (const TemplateTemplateParmDecl *TTP =
dyn_cast<TemplateTemplateParmDecl>(D)) {
- Out << "class ";
+ Out << "class";
+
if (TTP->isParameterPack())
- Out << "...";
- Out << D->getName();
+ Out << " ...";
+ else if (TTP->getDeclName())
+ Out << ' ';
+
+ if (TTP->getDeclName())
+ Out << TTP->getDeclName();
} else if (auto *TD = D->getTemplatedDecl())
Visit(TD);
else if (const auto *Concept = dyn_cast<ConceptDecl>(D)) {
@@ -1216,7 +1227,7 @@ void DeclPrinter::PrintObjCTypeParams(ObjCTypeParamList *Params) {
break;
}
- Out << Param->getDeclName().getAsString();
+ Out << Param->getDeclName();
if (Param->hasExplicitBound()) {
Out << " : " << Param->getUnderlyingType().getAsString(Policy);
@@ -1695,10 +1706,11 @@ void DeclPrinter::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP) {
if (TTP->isParameterPack())
Out << " ...";
- else if (!TTP->getName().empty())
+ else if (TTP->getDeclName())
Out << ' ';
- Out << *TTP;
+ if (TTP->getDeclName())
+ Out << TTP->getDeclName();
if (TTP->hasDefaultArgument()) {
Out << " = ";
diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp
index 7857e75f57a1..25235c56ec46 100644
--- a/clang/lib/AST/DeclTemplate.cpp
+++ b/clang/lib/AST/DeclTemplate.cpp
@@ -102,24 +102,10 @@ unsigned TemplateParameterList::getMinRequiredArguments() const {
unsigned NumRequiredArgs = 0;
for (const NamedDecl *P : asArray()) {
if (P->isTemplateParameterPack()) {
- if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) {
- if (NTTP->isExpandedParameterPack()) {
- NumRequiredArgs += NTTP->getNumExpansionTypes();
- continue;
- }
- } else if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(P)) {
- if (TTP->isExpandedParameterPack()) {
- NumRequiredArgs += TTP->getNumExpansionParameters();
- continue;
- }
- } else {
- const auto *TP = cast<TemplateTemplateParmDecl>(P);
- if (TP->isExpandedParameterPack()) {
- NumRequiredArgs += TP->getNumExpansionTemplateParameters();
- continue;
- }
+ if (Optional<unsigned> Expansions = getExpandedPackSize(P)) {
+ NumRequiredArgs += *Expansions;
+ continue;
}
-
break;
}
@@ -440,7 +426,7 @@ ClassTemplateDecl::getSpecializations() const {
}
llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> &
-ClassTemplateDecl::getPartialSpecializations() {
+ClassTemplateDecl::getPartialSpecializations() const {
LoadLazySpecializations();
return getCommonPtr()->PartialSpecializations;
}
@@ -528,7 +514,7 @@ void ClassTemplateDecl::AddPartialSpecialization(
}
void ClassTemplateDecl::getPartialSpecializations(
- SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) {
+ SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) const {
llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> &PartialSpecs
= getPartialSpecializations();
PS.clear();
@@ -914,10 +900,14 @@ void ClassTemplateSpecializationDecl::getNameForDiagnostic(
const auto *PS = dyn_cast<ClassTemplatePartialSpecializationDecl>(this);
if (const ASTTemplateArgumentListInfo *ArgsAsWritten =
PS ? PS->getTemplateArgsAsWritten() : nullptr) {
- printTemplateArgumentList(OS, ArgsAsWritten->arguments(), Policy);
+ printTemplateArgumentList(
+ OS, ArgsAsWritten->arguments(), Policy,
+ getSpecializedTemplate()->getTemplateParameters());
} else {
const TemplateArgumentList &TemplateArgs = getTemplateArgs();
- printTemplateArgumentList(OS, TemplateArgs.asArray(), Policy);
+ printTemplateArgumentList(
+ OS, TemplateArgs.asArray(), Policy,
+ getSpecializedTemplate()->getTemplateParameters());
}
}
@@ -1142,7 +1132,7 @@ VarTemplateDecl::getSpecializations() const {
}
llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl> &
-VarTemplateDecl::getPartialSpecializations() {
+VarTemplateDecl::getPartialSpecializations() const {
LoadLazySpecializations();
return getCommonPtr()->PartialSpecializations;
}
@@ -1198,7 +1188,7 @@ void VarTemplateDecl::AddPartialSpecialization(
}
void VarTemplateDecl::getPartialSpecializations(
- SmallVectorImpl<VarTemplatePartialSpecializationDecl *> &PS) {
+ SmallVectorImpl<VarTemplatePartialSpecializationDecl *> &PS) const {
llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl> &PartialSpecs =
getPartialSpecializations();
PS.clear();
@@ -1261,10 +1251,14 @@ void VarTemplateSpecializationDecl::getNameForDiagnostic(
const auto *PS = dyn_cast<VarTemplatePartialSpecializationDecl>(this);
if (const ASTTemplateArgumentListInfo *ArgsAsWritten =
PS ? PS->getTemplateArgsAsWritten() : nullptr) {
- printTemplateArgumentList(OS, ArgsAsWritten->arguments(), Policy);
+ printTemplateArgumentList(
+ OS, ArgsAsWritten->arguments(), Policy,
+ getSpecializedTemplate()->getTemplateParameters());
} else {
const TemplateArgumentList &TemplateArgs = getTemplateArgs();
- printTemplateArgumentList(OS, TemplateArgs.asArray(), Policy);
+ printTemplateArgumentList(
+ OS, TemplateArgs.asArray(), Policy,
+ getSpecializedTemplate()->getTemplateParameters());
}
}
@@ -1431,3 +1425,36 @@ void TypeConstraint::print(llvm::raw_ostream &OS, PrintingPolicy Policy) const {
OS << ">";
}
}
+
+TemplateParamObjectDecl *TemplateParamObjectDecl::Create(const ASTContext &C,
+ QualType T,
+ const APValue &V) {
+ DeclContext *DC = C.getTranslationUnitDecl();
+ auto *TPOD = new (C, DC) TemplateParamObjectDecl(DC, T, V);
+ C.addDestruction(&TPOD->Value);
+ return TPOD;
+}
+
+TemplateParamObjectDecl *
+TemplateParamObjectDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ auto *TPOD = new (C, ID) TemplateParamObjectDecl(nullptr, QualType(), APValue());
+ C.addDestruction(&TPOD->Value);
+ return TPOD;
+}
+
+void TemplateParamObjectDecl::printName(llvm::raw_ostream &OS) const {
+ OS << "<template param ";
+ printAsExpr(OS);
+ OS << ">";
+}
+
+void TemplateParamObjectDecl::printAsExpr(llvm::raw_ostream &OS) const {
+ const ASTContext &Ctx = getASTContext();
+ getType().getUnqualifiedType().print(OS, Ctx.getPrintingPolicy());
+ printAsInit(OS);
+}
+
+void TemplateParamObjectDecl::printAsInit(llvm::raw_ostream &OS) const {
+ const ASTContext &Ctx = getASTContext();
+ getValue().printPretty(OS, Ctx, getType());
+}
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index 399e7e13c445..adb33036a168 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/IgnoreExpr.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
@@ -39,7 +40,7 @@ using namespace clang;
const Expr *Expr::getBestDynamicClassTypeExpr() const {
const Expr *E = this;
while (true) {
- E = E->ignoreParenBaseCasts();
+ E = E->IgnoreParenBaseCasts();
// Follow the RHS of a comma operator.
if (auto *BO = dyn_cast<BinaryOperator>(E)) {
@@ -116,7 +117,8 @@ const Expr *Expr::skipRValueSubobjectAdjustments(
BO->getRHS()->getType()->getAs<MemberPointerType>();
Adjustments.push_back(SubobjectAdjustment(MPT, BO->getRHS()));
continue;
- } else if (BO->getOpcode() == BO_Comma) {
+ }
+ if (BO->getOpcode() == BO_Comma) {
CommaLHSs.push_back(BO->getLHS());
E = BO->getRHS();
continue;
@@ -359,7 +361,6 @@ llvm::APSInt ConstantExpr::getResultAsAPSInt() const {
}
APValue ConstantExpr::getAPValueResult() const {
- assert(hasAPValueResult());
switch (ConstantExprBits.ResultKind) {
case ConstantExpr::RSK_APValue:
@@ -369,6 +370,8 @@ APValue ConstantExpr::getAPValueResult() const {
llvm::APSInt(llvm::APInt(ConstantExprBits.BitWidth, Int64Result()),
ConstantExprBits.IsUnsigned));
case ConstantExpr::RSK_None:
+ if (ConstantExprBits.APValueKind == APValue::Indeterminate)
+ return APValue::IndeterminateValue();
return APValue();
}
llvm_unreachable("invalid ResultKind");
@@ -484,6 +487,11 @@ DeclRefExpr *DeclRefExpr::CreateEmpty(const ASTContext &Context,
return new (Mem) DeclRefExpr(EmptyShell());
}
+void DeclRefExpr::setDecl(ValueDecl *NewD) {
+ D = NewD;
+ setDependence(computeDependence(this, NewD->getASTContext()));
+}
+
SourceLocation DeclRefExpr::getBeginLoc() const {
if (hasQualifier())
return getQualifierLoc().getBeginLoc();
@@ -509,34 +517,6 @@ PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
setDependence(computeDependence(this));
}
-PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FnTy, IdentKind IK,
- TypeSourceInfo *Info)
- : Expr(PredefinedExprClass, FnTy, VK_LValue, OK_Ordinary) {
- PredefinedExprBits.Kind = IK;
- assert((getIdentKind() == IK) &&
- "IdentKind do not fit in PredefinedExprBitFields!");
- assert(IK == UniqueStableNameType &&
- "Constructor only valid with UniqueStableNameType");
- PredefinedExprBits.HasFunctionName = false;
- PredefinedExprBits.Loc = L;
- setTypeSourceInfo(Info);
- setDependence(computeDependence(this));
-}
-
-PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FnTy, IdentKind IK,
- Expr *E)
- : Expr(PredefinedExprClass, FnTy, VK_LValue, OK_Ordinary) {
- PredefinedExprBits.Kind = IK;
- assert((getIdentKind() == IK) &&
- "IdentKind do not fit in PredefinedExprBitFields!");
- assert(IK == UniqueStableNameExpr &&
- "Constructor only valid with UniqueStableNameExpr");
- PredefinedExprBits.HasFunctionName = false;
- PredefinedExprBits.Loc = L;
- setExpr(E);
- setDependence(computeDependence(this));
-}
-
PredefinedExpr::PredefinedExpr(EmptyShell Empty, bool HasFunctionName)
: Expr(PredefinedExprClass, Empty) {
PredefinedExprBits.HasFunctionName = HasFunctionName;
@@ -546,44 +526,15 @@ PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
QualType FNTy, IdentKind IK,
StringLiteral *SL) {
bool HasFunctionName = SL != nullptr;
- void *Mem = Ctx.Allocate(
- totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(HasFunctionName, 0, 0),
- alignof(PredefinedExpr));
- return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
-}
-
-PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
- QualType FNTy, IdentKind IK,
- StringLiteral *SL,
- TypeSourceInfo *Info) {
- assert(IK == UniqueStableNameType && "Only valid with UniqueStableNameType");
- bool HasFunctionName = SL != nullptr;
- void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(
- HasFunctionName, 0, !HasFunctionName),
- alignof(PredefinedExpr));
- if (HasFunctionName)
- return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
- return new (Mem) PredefinedExpr(L, FNTy, IK, Info);
-}
-
-PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
- QualType FNTy, IdentKind IK,
- StringLiteral *SL, Expr *E) {
- assert(IK == UniqueStableNameExpr && "Only valid with UniqueStableNameExpr");
- bool HasFunctionName = SL != nullptr;
- void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(
- HasFunctionName, !HasFunctionName, 0),
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
alignof(PredefinedExpr));
- if (HasFunctionName)
- return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
- return new (Mem) PredefinedExpr(L, FNTy, IK, E);
+ return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
}
PredefinedExpr *PredefinedExpr::CreateEmpty(const ASTContext &Ctx,
bool HasFunctionName) {
- void *Mem = Ctx.Allocate(
- totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(HasFunctionName, 0, 0),
- alignof(PredefinedExpr));
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
+ alignof(PredefinedExpr));
return new (Mem) PredefinedExpr(EmptyShell(), HasFunctionName);
}
@@ -603,28 +554,12 @@ StringRef PredefinedExpr::getIdentKindName(PredefinedExpr::IdentKind IK) {
return "__FUNCSIG__";
case LFuncSig:
return "L__FUNCSIG__";
- case UniqueStableNameType:
- case UniqueStableNameExpr:
- return "__builtin_unique_stable_name";
case PrettyFunctionNoVirtual:
break;
}
llvm_unreachable("Unknown ident kind for PredefinedExpr");
}
-std::string PredefinedExpr::ComputeName(ASTContext &Context, IdentKind IK,
- QualType Ty) {
- std::unique_ptr<MangleContext> Ctx{ItaniumMangleContext::create(
- Context, Context.getDiagnostics(), /*IsUniqueNameMangler*/ true)};
-
- Ty = Ty.getCanonicalType();
-
- SmallString<256> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
- Ctx->mangleTypeName(Ty, Out);
- return std::string(Buffer.str());
-}
-
// FIXME: Maybe this should use DeclPrinter with a special "print predefined
// expr" policy instead.
std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
@@ -652,8 +587,8 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
if (!Buffer.empty() && Buffer.front() == '\01')
return std::string(Buffer.substr(1));
return std::string(Buffer.str());
- } else
- return std::string(ND->getIdentifier()->getName());
+ }
+ return std::string(ND->getIdentifier()->getName());
}
return "";
}
@@ -1303,8 +1238,8 @@ OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
CallExpr::CallExpr(StmtClass SC, Expr *Fn, ArrayRef<Expr *> PreArgs,
ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
- SourceLocation RParenLoc, unsigned MinNumArgs,
- ADLCallKind UsesADL)
+ SourceLocation RParenLoc, FPOptionsOverride FPFeatures,
+ unsigned MinNumArgs, ADLCallKind UsesADL)
: Expr(SC, Ty, VK, OK_Ordinary), RParenLoc(RParenLoc) {
NumArgs = std::max<unsigned>(Args.size(), MinNumArgs);
unsigned NumPreArgs = PreArgs.size();
@@ -1327,10 +1262,14 @@ CallExpr::CallExpr(StmtClass SC, Expr *Fn, ArrayRef<Expr *> PreArgs,
setArg(I, nullptr);
setDependence(computeDependence(this, PreArgs));
+
+ CallExprBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ if (hasStoredFPFeatures())
+ setStoredFPFeatures(FPFeatures);
}
CallExpr::CallExpr(StmtClass SC, unsigned NumPreArgs, unsigned NumArgs,
- EmptyShell Empty)
+ bool HasFPFeatures, EmptyShell Empty)
: Expr(SC, Empty), NumArgs(NumArgs) {
CallExprBits.NumPreArgs = NumPreArgs;
assert((NumPreArgs == getNumPreArgs()) && "NumPreArgs overflow!");
@@ -1339,19 +1278,21 @@ CallExpr::CallExpr(StmtClass SC, unsigned NumPreArgs, unsigned NumArgs,
CallExprBits.OffsetToTrailingObjects = OffsetToTrailingObjects;
assert((CallExprBits.OffsetToTrailingObjects == OffsetToTrailingObjects) &&
"OffsetToTrailingObjects overflow!");
+ CallExprBits.HasFPFeatures = HasFPFeatures;
}
CallExpr *CallExpr::Create(const ASTContext &Ctx, Expr *Fn,
ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
- SourceLocation RParenLoc, unsigned MinNumArgs,
+ SourceLocation RParenLoc,
+ FPOptionsOverride FPFeatures, unsigned MinNumArgs,
ADLCallKind UsesADL) {
unsigned NumArgs = std::max<unsigned>(Args.size(), MinNumArgs);
- unsigned SizeOfTrailingObjects =
- CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ unsigned SizeOfTrailingObjects = CallExpr::sizeOfTrailingObjects(
+ /*NumPreArgs=*/0, NumArgs, FPFeatures.requiresTrailingStorage());
void *Mem =
Ctx.Allocate(sizeof(CallExpr) + SizeOfTrailingObjects, alignof(CallExpr));
return new (Mem) CallExpr(CallExprClass, Fn, /*PreArgs=*/{}, Args, Ty, VK,
- RParenLoc, MinNumArgs, UsesADL);
+ RParenLoc, FPFeatures, MinNumArgs, UsesADL);
}
CallExpr *CallExpr::CreateTemporary(void *Mem, Expr *Fn, QualType Ty,
@@ -1360,17 +1301,18 @@ CallExpr *CallExpr::CreateTemporary(void *Mem, Expr *Fn, QualType Ty,
assert(!(reinterpret_cast<uintptr_t>(Mem) % alignof(CallExpr)) &&
"Misaligned memory in CallExpr::CreateTemporary!");
return new (Mem) CallExpr(CallExprClass, Fn, /*PreArgs=*/{}, /*Args=*/{}, Ty,
- VK, RParenLoc,
+ VK, RParenLoc, FPOptionsOverride(),
/*MinNumArgs=*/0, UsesADL);
}
CallExpr *CallExpr::CreateEmpty(const ASTContext &Ctx, unsigned NumArgs,
- EmptyShell Empty) {
+ bool HasFPFeatures, EmptyShell Empty) {
unsigned SizeOfTrailingObjects =
- CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs, HasFPFeatures);
void *Mem =
Ctx.Allocate(sizeof(CallExpr) + SizeOfTrailingObjects, alignof(CallExpr));
- return new (Mem) CallExpr(CallExprClass, /*NumPreArgs=*/0, NumArgs, Empty);
+ return new (Mem)
+ CallExpr(CallExprClass, /*NumPreArgs=*/0, NumArgs, HasFPFeatures, Empty);
}
unsigned CallExpr::offsetToTrailingObjects(StmtClass SC) {
@@ -1636,6 +1578,11 @@ MemberExpr *MemberExpr::CreateEmpty(const ASTContext &Context,
return new (Mem) MemberExpr(EmptyShell());
}
+void MemberExpr::setMemberDecl(ValueDecl *D) {
+ MemberDecl = D;
+ setDependence(computeDependence(this));
+}
+
SourceLocation MemberExpr::getBeginLoc() const {
if (isImplicitAccess()) {
if (hasQualifier())
@@ -1756,6 +1703,8 @@ bool CastExpr::CastConsistency() const {
case CK_ARCExtendBlockObject:
case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
case CK_FixedPointCast:
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
@@ -1817,7 +1766,7 @@ Expr *CastExpr::getSubExprAsWritten() {
// subexpression describing the call; strip it off.
if (E->getCastKind() == CK_ConstructorConversion)
SubExpr =
- skipImplicitTemporary(cast<CXXConstructExpr>(SubExpr)->getArg(0));
+ skipImplicitTemporary(cast<CXXConstructExpr>(SubExpr->IgnoreImplicit())->getArg(0));
else if (E->getCastKind() == CK_UserDefinedConversion) {
assert((isa<CXXMemberCallExpr>(SubExpr) ||
isa<BlockExpr>(SubExpr)) &&
@@ -1884,19 +1833,42 @@ const FieldDecl *CastExpr::getTargetFieldForToUnionCast(const RecordDecl *RD,
return nullptr;
}
+FPOptionsOverride *CastExpr::getTrailingFPFeatures() {
+ assert(hasStoredFPFeatures());
+ switch (getStmtClass()) {
+ case ImplicitCastExprClass:
+ return static_cast<ImplicitCastExpr *>(this)
+ ->getTrailingObjects<FPOptionsOverride>();
+ case CStyleCastExprClass:
+ return static_cast<CStyleCastExpr *>(this)
+ ->getTrailingObjects<FPOptionsOverride>();
+ case CXXFunctionalCastExprClass:
+ return static_cast<CXXFunctionalCastExpr *>(this)
+ ->getTrailingObjects<FPOptionsOverride>();
+ case CXXStaticCastExprClass:
+ return static_cast<CXXStaticCastExpr *>(this)
+ ->getTrailingObjects<FPOptionsOverride>();
+ default:
+ llvm_unreachable("Cast does not have FPFeatures");
+ }
+}
+
ImplicitCastExpr *ImplicitCastExpr::Create(const ASTContext &C, QualType T,
CastKind Kind, Expr *Operand,
const CXXCastPath *BasePath,
- ExprValueKind VK) {
+ ExprValueKind VK,
+ FPOptionsOverride FPO) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ void *Buffer =
+ C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *, FPOptionsOverride>(
+ PathSize, FPO.requiresTrailingStorage()));
// Per C++ [conv.lval]p3, lvalue-to-rvalue conversions on class and
// std::nullptr_t have special semantics not captured by CK_LValueToRValue.
assert((Kind != CK_LValueToRValue ||
!(T->isNullPtrType() || T->getAsCXXRecordDecl())) &&
"invalid type for lvalue-to-rvalue conversion");
ImplicitCastExpr *E =
- new (Buffer) ImplicitCastExpr(T, Kind, Operand, PathSize, VK);
+ new (Buffer) ImplicitCastExpr(T, Kind, Operand, PathSize, FPO, VK);
if (PathSize)
std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
E->getTrailingObjects<CXXBaseSpecifier *>());
@@ -1904,21 +1876,26 @@ ImplicitCastExpr *ImplicitCastExpr::Create(const ASTContext &C, QualType T,
}
ImplicitCastExpr *ImplicitCastExpr::CreateEmpty(const ASTContext &C,
- unsigned PathSize) {
- void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
- return new (Buffer) ImplicitCastExpr(EmptyShell(), PathSize);
+ unsigned PathSize,
+ bool HasFPFeatures) {
+ void *Buffer =
+ C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *, FPOptionsOverride>(
+ PathSize, HasFPFeatures));
+ return new (Buffer) ImplicitCastExpr(EmptyShell(), PathSize, HasFPFeatures);
}
-
CStyleCastExpr *CStyleCastExpr::Create(const ASTContext &C, QualType T,
ExprValueKind VK, CastKind K, Expr *Op,
const CXXCastPath *BasePath,
+ FPOptionsOverride FPO,
TypeSourceInfo *WrittenTy,
SourceLocation L, SourceLocation R) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ void *Buffer =
+ C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *, FPOptionsOverride>(
+ PathSize, FPO.requiresTrailingStorage()));
CStyleCastExpr *E =
- new (Buffer) CStyleCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, R);
+ new (Buffer) CStyleCastExpr(T, VK, K, Op, PathSize, FPO, WrittenTy, L, R);
if (PathSize)
std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
E->getTrailingObjects<CXXBaseSpecifier *>());
@@ -1926,9 +1903,12 @@ CStyleCastExpr *CStyleCastExpr::Create(const ASTContext &C, QualType T,
}
CStyleCastExpr *CStyleCastExpr::CreateEmpty(const ASTContext &C,
- unsigned PathSize) {
- void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
- return new (Buffer) CStyleCastExpr(EmptyShell(), PathSize);
+ unsigned PathSize,
+ bool HasFPFeatures) {
+ void *Buffer =
+ C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *, FPOptionsOverride>(
+ PathSize, HasFPFeatures));
+ return new (Buffer) CStyleCastExpr(EmptyShell(), PathSize, HasFPFeatures);
}
/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
@@ -2651,6 +2631,8 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
// Otherwise, the result of the cast is unused.
if (CE->getCastKind() == CK_ConstructorConversion)
return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ if (CE->getCastKind() == CK_Dependent)
+ return false;
WarnE = this;
if (const CXXFunctionalCastExpr *CXXCE =
@@ -2770,162 +2752,8 @@ QualType Expr::findBoundMemberType(const Expr *expr) {
return QualType();
}
-static Expr *IgnoreImpCastsSingleStep(Expr *E) {
- if (auto *ICE = dyn_cast<ImplicitCastExpr>(E))
- return ICE->getSubExpr();
-
- if (auto *FE = dyn_cast<FullExpr>(E))
- return FE->getSubExpr();
-
- return E;
-}
-
-static Expr *IgnoreImpCastsExtraSingleStep(Expr *E) {
- // FIXME: Skip MaterializeTemporaryExpr and SubstNonTypeTemplateParmExpr in
- // addition to what IgnoreImpCasts() skips to account for the current
- // behaviour of IgnoreParenImpCasts().
- Expr *SubE = IgnoreImpCastsSingleStep(E);
- if (SubE != E)
- return SubE;
-
- if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
- return MTE->getSubExpr();
-
- if (auto *NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E))
- return NTTP->getReplacement();
-
- return E;
-}
-
-static Expr *IgnoreCastsSingleStep(Expr *E) {
- if (auto *CE = dyn_cast<CastExpr>(E))
- return CE->getSubExpr();
-
- if (auto *FE = dyn_cast<FullExpr>(E))
- return FE->getSubExpr();
-
- if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
- return MTE->getSubExpr();
-
- if (auto *NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E))
- return NTTP->getReplacement();
-
- return E;
-}
-
-static Expr *IgnoreLValueCastsSingleStep(Expr *E) {
- // Skip what IgnoreCastsSingleStep skips, except that only
- // lvalue-to-rvalue casts are skipped.
- if (auto *CE = dyn_cast<CastExpr>(E))
- if (CE->getCastKind() != CK_LValueToRValue)
- return E;
-
- return IgnoreCastsSingleStep(E);
-}
-
-static Expr *IgnoreBaseCastsSingleStep(Expr *E) {
- if (auto *CE = dyn_cast<CastExpr>(E))
- if (CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase ||
- CE->getCastKind() == CK_NoOp)
- return CE->getSubExpr();
-
- return E;
-}
-
-static Expr *IgnoreImplicitSingleStep(Expr *E) {
- Expr *SubE = IgnoreImpCastsSingleStep(E);
- if (SubE != E)
- return SubE;
-
- if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
- return MTE->getSubExpr();
-
- if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
- return BTE->getSubExpr();
-
- return E;
-}
-
-static Expr *IgnoreImplicitAsWrittenSingleStep(Expr *E) {
- if (auto *ICE = dyn_cast<ImplicitCastExpr>(E))
- return ICE->getSubExprAsWritten();
-
- return IgnoreImplicitSingleStep(E);
-}
-
-static Expr *IgnoreParensOnlySingleStep(Expr *E) {
- if (auto *PE = dyn_cast<ParenExpr>(E))
- return PE->getSubExpr();
- return E;
-}
-
-static Expr *IgnoreParensSingleStep(Expr *E) {
- if (auto *PE = dyn_cast<ParenExpr>(E))
- return PE->getSubExpr();
-
- if (auto *UO = dyn_cast<UnaryOperator>(E)) {
- if (UO->getOpcode() == UO_Extension)
- return UO->getSubExpr();
- }
-
- else if (auto *GSE = dyn_cast<GenericSelectionExpr>(E)) {
- if (!GSE->isResultDependent())
- return GSE->getResultExpr();
- }
-
- else if (auto *CE = dyn_cast<ChooseExpr>(E)) {
- if (!CE->isConditionDependent())
- return CE->getChosenSubExpr();
- }
-
- return E;
-}
-
-static Expr *IgnoreNoopCastsSingleStep(const ASTContext &Ctx, Expr *E) {
- if (auto *CE = dyn_cast<CastExpr>(E)) {
- // We ignore integer <-> casts that are of the same width, ptr<->ptr and
- // ptr<->int casts of the same width. We also ignore all identity casts.
- Expr *SubExpr = CE->getSubExpr();
- bool IsIdentityCast =
- Ctx.hasSameUnqualifiedType(E->getType(), SubExpr->getType());
- bool IsSameWidthCast =
- (E->getType()->isPointerType() || E->getType()->isIntegralType(Ctx)) &&
- (SubExpr->getType()->isPointerType() ||
- SubExpr->getType()->isIntegralType(Ctx)) &&
- (Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SubExpr->getType()));
-
- if (IsIdentityCast || IsSameWidthCast)
- return SubExpr;
- }
-
- else if (auto *NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E))
- return NTTP->getReplacement();
-
- return E;
-}
-
-static Expr *IgnoreExprNodesImpl(Expr *E) { return E; }
-template <typename FnTy, typename... FnTys>
-static Expr *IgnoreExprNodesImpl(Expr *E, FnTy &&Fn, FnTys &&... Fns) {
- return IgnoreExprNodesImpl(Fn(E), std::forward<FnTys>(Fns)...);
-}
-
-/// Given an expression E and functions Fn_1,...,Fn_n : Expr * -> Expr *,
-/// Recursively apply each of the functions to E until reaching a fixed point.
-/// Note that a null E is valid; in this case nothing is done.
-template <typename... FnTys>
-static Expr *IgnoreExprNodes(Expr *E, FnTys &&... Fns) {
- Expr *LastE = nullptr;
- while (E != LastE) {
- LastE = E;
- E = IgnoreExprNodesImpl(E, std::forward<FnTys>(Fns)...);
- }
- return E;
-}
-
Expr *Expr::IgnoreImpCasts() {
- return IgnoreExprNodes(this, IgnoreImpCastsSingleStep);
+ return IgnoreExprNodes(this, IgnoreImplicitCastsSingleStep);
}
Expr *Expr::IgnoreCasts() {
@@ -2946,14 +2774,14 @@ Expr *Expr::IgnoreParens() {
Expr *Expr::IgnoreParenImpCasts() {
return IgnoreExprNodes(this, IgnoreParensSingleStep,
- IgnoreImpCastsExtraSingleStep);
+ IgnoreImplicitCastsExtraSingleStep);
}
Expr *Expr::IgnoreParenCasts() {
return IgnoreExprNodes(this, IgnoreParensSingleStep, IgnoreCastsSingleStep);
}
-Expr *Expr::IgnoreConversionOperator() {
+Expr *Expr::IgnoreConversionOperatorSingleStep() {
if (auto *MCE = dyn_cast<CXXMemberCallExpr>(this)) {
if (MCE->getMethodDecl() && isa<CXXConversionDecl>(MCE->getMethodDecl()))
return MCE->getImplicitObjectArgument();
@@ -2966,58 +2794,77 @@ Expr *Expr::IgnoreParenLValueCasts() {
IgnoreLValueCastsSingleStep);
}
-Expr *Expr::ignoreParenBaseCasts() {
+Expr *Expr::IgnoreParenBaseCasts() {
return IgnoreExprNodes(this, IgnoreParensSingleStep,
IgnoreBaseCastsSingleStep);
}
Expr *Expr::IgnoreParenNoopCasts(const ASTContext &Ctx) {
- return IgnoreExprNodes(this, IgnoreParensSingleStep, [&Ctx](Expr *E) {
- return IgnoreNoopCastsSingleStep(Ctx, E);
- });
+ auto IgnoreNoopCastsSingleStep = [&Ctx](Expr *E) {
+ if (auto *CE = dyn_cast<CastExpr>(E)) {
+ // We ignore integer <-> casts that are of the same width, ptr<->ptr and
+ // ptr<->int casts of the same width. We also ignore all identity casts.
+ Expr *SubExpr = CE->getSubExpr();
+ bool IsIdentityCast =
+ Ctx.hasSameUnqualifiedType(E->getType(), SubExpr->getType());
+ bool IsSameWidthCast = (E->getType()->isPointerType() ||
+ E->getType()->isIntegralType(Ctx)) &&
+ (SubExpr->getType()->isPointerType() ||
+ SubExpr->getType()->isIntegralType(Ctx)) &&
+ (Ctx.getTypeSize(E->getType()) ==
+ Ctx.getTypeSize(SubExpr->getType()));
+
+ if (IsIdentityCast || IsSameWidthCast)
+ return SubExpr;
+ } else if (auto *NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ return NTTP->getReplacement();
+
+ return E;
+ };
+ return IgnoreExprNodes(this, IgnoreParensSingleStep,
+ IgnoreNoopCastsSingleStep);
}
Expr *Expr::IgnoreUnlessSpelledInSource() {
- Expr *E = this;
-
- Expr *LastE = nullptr;
- while (E != LastE) {
- LastE = E;
- E = IgnoreExprNodes(E, IgnoreImplicitSingleStep,
- IgnoreImpCastsExtraSingleStep,
- IgnoreParensOnlySingleStep);
-
- auto SR = E->getSourceRange();
+ auto IgnoreImplicitConstructorSingleStep = [](Expr *E) {
+ if (auto *Cast = dyn_cast<CXXFunctionalCastExpr>(E)) {
+ auto *SE = Cast->getSubExpr();
+ if (SE->getSourceRange() == E->getSourceRange())
+ return SE;
+ }
if (auto *C = dyn_cast<CXXConstructExpr>(E)) {
auto NumArgs = C->getNumArgs();
if (NumArgs == 1 ||
(NumArgs > 1 && isa<CXXDefaultArgExpr>(C->getArg(1)))) {
Expr *A = C->getArg(0);
- if (A->getSourceRange() == SR || !isa<CXXTemporaryObjectExpr>(C))
- E = A;
+ if (A->getSourceRange() == E->getSourceRange() || C->isElidable())
+ return A;
}
}
-
+ return E;
+ };
+ auto IgnoreImplicitMemberCallSingleStep = [](Expr *E) {
if (auto *C = dyn_cast<CXXMemberCallExpr>(E)) {
Expr *ExprNode = C->getImplicitObjectArgument();
- if (ExprNode->getSourceRange() == SR) {
- E = ExprNode;
- continue;
+ if (ExprNode->getSourceRange() == E->getSourceRange()) {
+ return ExprNode;
}
if (auto *PE = dyn_cast<ParenExpr>(ExprNode)) {
if (PE->getSourceRange() == C->getSourceRange()) {
- E = PE;
- continue;
+ return cast<Expr>(PE);
}
}
ExprNode = ExprNode->IgnoreParenImpCasts();
- if (ExprNode->getSourceRange() == SR)
- E = ExprNode;
+ if (ExprNode->getSourceRange() == E->getSourceRange())
+ return ExprNode;
}
- }
-
- return E;
+ return E;
+ };
+ return IgnoreExprNodes(
+ this, IgnoreImplicitSingleStep, IgnoreImplicitCastsExtraSingleStep,
+ IgnoreParensOnlySingleStep, IgnoreImplicitConstructorSingleStep,
+ IgnoreImplicitMemberCallSingleStep);
}
bool Expr::isDefaultArgument() const {
@@ -3397,9 +3244,6 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
if (!IncludePossibleEffects && getExprLoc().isMacroID())
return false;
- if (isInstantiationDependent())
- return IncludePossibleEffects;
-
switch (getStmtClass()) {
case NoStmtClass:
#define ABSTRACT_STMT(Type)
@@ -3419,7 +3263,8 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case TypoExprClass:
case RecoveryExprClass:
case CXXFoldExprClass:
- llvm_unreachable("shouldn't see dependent / unresolved nodes here");
+ // Make a conservative assumption for dependent nodes.
+ return IncludePossibleEffects;
case DeclRefExprClass:
case ObjCIvarRefExprClass:
@@ -3676,6 +3521,18 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
return false;
}
+FPOptions Expr::getFPFeaturesInEffect(const LangOptions &LO) const {
+ if (auto Call = dyn_cast<CallExpr>(this))
+ return Call->getFPFeaturesInEffect(LO);
+ if (auto UO = dyn_cast<UnaryOperator>(this))
+ return UO->getFPFeaturesInEffect(LO);
+ if (auto BO = dyn_cast<BinaryOperator>(this))
+ return BO->getFPFeaturesInEffect(LO);
+ if (auto Cast = dyn_cast<CastExpr>(this))
+ return Cast->getFPFeaturesInEffect(LO);
+ return FPOptions::defaultWithoutTrailingStorage(LO);
+}
+
namespace {
/// Look for a call to a non-trivial function within an expression.
class NonTrivialCallFinder : public ConstEvaluatedExprVisitor<NonTrivialCallFinder>
@@ -3740,6 +3597,9 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
NullPointerConstantValueDependence NPC) const {
if (isValueDependent() &&
(!Ctx.getLangOpts().CPlusPlus11 || Ctx.getLangOpts().MSVCCompat)) {
+ // Error-dependent expr should never be a null pointer.
+ if (containsErrors())
+ return NPCK_NotNull;
switch (NPC) {
case NPC_NeverValueDependent:
llvm_unreachable("Unexpected value dependent expression!");
@@ -3841,7 +3701,7 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
const IntegerLiteral *Lit = dyn_cast<IntegerLiteral>(this);
if (Lit && !Lit->getValue())
return NPCK_ZeroLiteral;
- else if (!Ctx.getLangOpts().MSVCCompat || !isCXX98IntegralConstantExpr(Ctx))
+ if (!Ctx.getLangOpts().MSVCCompat || !isCXX98IntegralConstantExpr(Ctx))
return NPCK_NotNull;
} else {
// If we have an integer constant expression, we need to *evaluate* it and
@@ -4270,9 +4130,8 @@ GenericSelectionExpr::CreateEmpty(const ASTContext &Context,
IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const {
assert(Kind == FieldDesignator && "Only valid on a field designator");
if (Field.NameOrField & 0x01)
- return reinterpret_cast<IdentifierInfo *>(Field.NameOrField&~0x01);
- else
- return getField()->getIdentifier();
+ return reinterpret_cast<IdentifierInfo *>(Field.NameOrField & ~0x01);
+ return getField()->getIdentifier();
}
DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
@@ -4350,14 +4209,10 @@ SourceLocation DesignatedInitExpr::getBeginLoc() const {
SourceLocation StartLoc;
auto *DIE = const_cast<DesignatedInitExpr *>(this);
Designator &First = *DIE->getDesignator(0);
- if (First.isFieldDesignator()) {
- if (GNUSyntax)
- StartLoc = SourceLocation::getFromRawEncoding(First.Field.FieldLoc);
- else
- StartLoc = SourceLocation::getFromRawEncoding(First.Field.DotLoc);
- } else
- StartLoc =
- SourceLocation::getFromRawEncoding(First.ArrayOrRange.LBracketLoc);
+ if (First.isFieldDesignator())
+ StartLoc = GNUSyntax ? First.Field.FieldLoc : First.Field.DotLoc;
+ else
+ StartLoc = First.ArrayOrRange.LBracketLoc;
return StartLoc;
}
@@ -4394,7 +4249,8 @@ void DesignatedInitExpr::ExpandDesignator(const ASTContext &C, unsigned Idx,
Designators + Idx);
--NumNewDesignators;
return;
- } else if (NumNewDesignators == 1) {
+ }
+ if (NumNewDesignators == 1) {
Designators[Idx] = *First;
return;
}
@@ -4477,8 +4333,8 @@ BinaryOperator::BinaryOperator(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
BinaryOperatorBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
- if (BinaryOperatorBits.HasFPFeatures)
- *getTrailingFPFeatures() = FPFeatures;
+ if (hasStoredFPFeatures())
+ setStoredFPFeatures(FPFeatures);
setDependence(computeDependence(this));
}
@@ -4494,8 +4350,8 @@ BinaryOperator::BinaryOperator(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
BinaryOperatorBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
- if (BinaryOperatorBits.HasFPFeatures)
- *getTrailingFPFeatures() = FPFeatures;
+ if (hasStoredFPFeatures())
+ setStoredFPFeatures(FPFeatures);
setDependence(computeDependence(this));
}
@@ -4559,7 +4415,9 @@ UnaryOperator::UnaryOperator(const ASTContext &Ctx, Expr *input, Opcode opc,
UnaryOperatorBits.CanOverflow = CanOverflow;
UnaryOperatorBits.Loc = l;
UnaryOperatorBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
- setDependence(computeDependence(this));
+ if (hasStoredFPFeatures())
+ setStoredFPFeatures(FPFeatures);
+ setDependence(computeDependence(this, Ctx));
}
UnaryOperator *UnaryOperator::Create(const ASTContext &C, Expr *input,
diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp
index 5d99f61c579f..8dc9d4296e14 100644
--- a/clang/lib/AST/ExprCXX.cpp
+++ b/clang/lib/AST/ExprCXX.cpp
@@ -146,6 +146,18 @@ bool CXXTypeidExpr::isPotentiallyEvaluated() const {
return false;
}
+bool CXXTypeidExpr::isMostDerived(ASTContext &Context) const {
+ assert(!isTypeOperand() && "Cannot call isMostDerived for typeid(type)");
+ const Expr *E = getExprOperand()->IgnoreParenNoopCasts(Context);
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ QualType Ty = DRE->getDecl()->getType();
+ if (!Ty->isPointerType() && !Ty->isReferenceType())
+ return true;
+ }
+
+ return false;
+}
+
QualType CXXTypeidExpr::getTypeOperand(ASTContext &Context) const {
assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
Qualifiers Quals;
@@ -528,17 +540,18 @@ CXXOperatorCallExpr::CXXOperatorCallExpr(OverloadedOperatorKind OpKind,
FPOptionsOverride FPFeatures,
ADLCallKind UsesADL)
: CallExpr(CXXOperatorCallExprClass, Fn, /*PreArgs=*/{}, Args, Ty, VK,
- OperatorLoc, /*MinNumArgs=*/0, UsesADL) {
+ OperatorLoc, FPFeatures, /*MinNumArgs=*/0, UsesADL) {
CXXOperatorCallExprBits.OperatorKind = OpKind;
assert(
(CXXOperatorCallExprBits.OperatorKind == static_cast<unsigned>(OpKind)) &&
"OperatorKind overflow!");
Range = getSourceRangeImpl();
- Overrides = FPFeatures;
}
-CXXOperatorCallExpr::CXXOperatorCallExpr(unsigned NumArgs, EmptyShell Empty)
- : CallExpr(CXXOperatorCallExprClass, /*NumPreArgs=*/0, NumArgs, Empty) {}
+CXXOperatorCallExpr::CXXOperatorCallExpr(unsigned NumArgs, bool HasFPFeatures,
+ EmptyShell Empty)
+ : CallExpr(CXXOperatorCallExprClass, /*NumPreArgs=*/0, NumArgs,
+ HasFPFeatures, Empty) {}
CXXOperatorCallExpr *
CXXOperatorCallExpr::Create(const ASTContext &Ctx,
@@ -548,8 +561,8 @@ CXXOperatorCallExpr::Create(const ASTContext &Ctx,
FPOptionsOverride FPFeatures, ADLCallKind UsesADL) {
// Allocate storage for the trailing objects of CallExpr.
unsigned NumArgs = Args.size();
- unsigned SizeOfTrailingObjects =
- CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ unsigned SizeOfTrailingObjects = CallExpr::sizeOfTrailingObjects(
+ /*NumPreArgs=*/0, NumArgs, FPFeatures.requiresTrailingStorage());
void *Mem = Ctx.Allocate(sizeof(CXXOperatorCallExpr) + SizeOfTrailingObjects,
alignof(CXXOperatorCallExpr));
return new (Mem) CXXOperatorCallExpr(OpKind, Fn, Args, Ty, VK, OperatorLoc,
@@ -558,13 +571,14 @@ CXXOperatorCallExpr::Create(const ASTContext &Ctx,
CXXOperatorCallExpr *CXXOperatorCallExpr::CreateEmpty(const ASTContext &Ctx,
unsigned NumArgs,
+ bool HasFPFeatures,
EmptyShell Empty) {
// Allocate storage for the trailing objects of CallExpr.
unsigned SizeOfTrailingObjects =
- CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs, HasFPFeatures);
void *Mem = Ctx.Allocate(sizeof(CXXOperatorCallExpr) + SizeOfTrailingObjects,
alignof(CXXOperatorCallExpr));
- return new (Mem) CXXOperatorCallExpr(NumArgs, Empty);
+ return new (Mem) CXXOperatorCallExpr(NumArgs, HasFPFeatures, Empty);
}
SourceRange CXXOperatorCallExpr::getSourceRangeImpl() const {
@@ -593,36 +607,43 @@ SourceRange CXXOperatorCallExpr::getSourceRangeImpl() const {
CXXMemberCallExpr::CXXMemberCallExpr(Expr *Fn, ArrayRef<Expr *> Args,
QualType Ty, ExprValueKind VK,
- SourceLocation RP, unsigned MinNumArgs)
+ SourceLocation RP,
+ FPOptionsOverride FPOptions,
+ unsigned MinNumArgs)
: CallExpr(CXXMemberCallExprClass, Fn, /*PreArgs=*/{}, Args, Ty, VK, RP,
- MinNumArgs, NotADL) {}
+ FPOptions, MinNumArgs, NotADL) {}
-CXXMemberCallExpr::CXXMemberCallExpr(unsigned NumArgs, EmptyShell Empty)
- : CallExpr(CXXMemberCallExprClass, /*NumPreArgs=*/0, NumArgs, Empty) {}
+CXXMemberCallExpr::CXXMemberCallExpr(unsigned NumArgs, bool HasFPFeatures,
+ EmptyShell Empty)
+ : CallExpr(CXXMemberCallExprClass, /*NumPreArgs=*/0, NumArgs, HasFPFeatures,
+ Empty) {}
CXXMemberCallExpr *CXXMemberCallExpr::Create(const ASTContext &Ctx, Expr *Fn,
ArrayRef<Expr *> Args, QualType Ty,
ExprValueKind VK,
SourceLocation RP,
+ FPOptionsOverride FPFeatures,
unsigned MinNumArgs) {
// Allocate storage for the trailing objects of CallExpr.
unsigned NumArgs = std::max<unsigned>(Args.size(), MinNumArgs);
- unsigned SizeOfTrailingObjects =
- CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ unsigned SizeOfTrailingObjects = CallExpr::sizeOfTrailingObjects(
+ /*NumPreArgs=*/0, NumArgs, FPFeatures.requiresTrailingStorage());
void *Mem = Ctx.Allocate(sizeof(CXXMemberCallExpr) + SizeOfTrailingObjects,
alignof(CXXMemberCallExpr));
- return new (Mem) CXXMemberCallExpr(Fn, Args, Ty, VK, RP, MinNumArgs);
+ return new (Mem)
+ CXXMemberCallExpr(Fn, Args, Ty, VK, RP, FPFeatures, MinNumArgs);
}
CXXMemberCallExpr *CXXMemberCallExpr::CreateEmpty(const ASTContext &Ctx,
unsigned NumArgs,
+ bool HasFPFeatures,
EmptyShell Empty) {
// Allocate storage for the trailing objects of CallExpr.
unsigned SizeOfTrailingObjects =
- CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs, HasFPFeatures);
void *Mem = Ctx.Allocate(sizeof(CXXMemberCallExpr) + SizeOfTrailingObjects,
alignof(CXXMemberCallExpr));
- return new (Mem) CXXMemberCallExpr(NumArgs, Empty);
+ return new (Mem) CXXMemberCallExpr(NumArgs, HasFPFeatures, Empty);
}
Expr *CXXMemberCallExpr::getImplicitObjectArgument() const {
@@ -681,19 +702,18 @@ const char *CXXNamedCastExpr::getCastName() const {
}
}
-CXXStaticCastExpr *CXXStaticCastExpr::Create(const ASTContext &C, QualType T,
- ExprValueKind VK,
- CastKind K, Expr *Op,
- const CXXCastPath *BasePath,
- TypeSourceInfo *WrittenTy,
- SourceLocation L,
- SourceLocation RParenLoc,
- SourceRange AngleBrackets) {
+CXXStaticCastExpr *
+CXXStaticCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
+ CastKind K, Expr *Op, const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy, FPOptionsOverride FPO,
+ SourceLocation L, SourceLocation RParenLoc,
+ SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
- auto *E =
- new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
- RParenLoc, AngleBrackets);
+ void *Buffer =
+ C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *, FPOptionsOverride>(
+ PathSize, FPO.requiresTrailingStorage()));
+ auto *E = new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy,
+ FPO, L, RParenLoc, AngleBrackets);
if (PathSize)
std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
E->getTrailingObjects<CXXBaseSpecifier *>());
@@ -701,9 +721,12 @@ CXXStaticCastExpr *CXXStaticCastExpr::Create(const ASTContext &C, QualType T,
}
CXXStaticCastExpr *CXXStaticCastExpr::CreateEmpty(const ASTContext &C,
- unsigned PathSize) {
- void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
- return new (Buffer) CXXStaticCastExpr(EmptyShell(), PathSize);
+ unsigned PathSize,
+ bool HasFPFeatures) {
+ void *Buffer =
+ C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *, FPOptionsOverride>(
+ PathSize, HasFPFeatures));
+ return new (Buffer) CXXStaticCastExpr(EmptyShell(), PathSize, HasFPFeatures);
}
CXXDynamicCastExpr *CXXDynamicCastExpr::Create(const ASTContext &C, QualType T,
@@ -814,25 +837,30 @@ CXXAddrspaceCastExpr *CXXAddrspaceCastExpr::CreateEmpty(const ASTContext &C) {
return new (C) CXXAddrspaceCastExpr(EmptyShell());
}
-CXXFunctionalCastExpr *
-CXXFunctionalCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
- TypeSourceInfo *Written, CastKind K, Expr *Op,
- const CXXCastPath *BasePath,
- SourceLocation L, SourceLocation R) {
+CXXFunctionalCastExpr *CXXFunctionalCastExpr::Create(
+ const ASTContext &C, QualType T, ExprValueKind VK, TypeSourceInfo *Written,
+ CastKind K, Expr *Op, const CXXCastPath *BasePath, FPOptionsOverride FPO,
+ SourceLocation L, SourceLocation R) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
- auto *E =
- new (Buffer) CXXFunctionalCastExpr(T, VK, Written, K, Op, PathSize, L, R);
+ void *Buffer =
+ C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *, FPOptionsOverride>(
+ PathSize, FPO.requiresTrailingStorage()));
+ auto *E = new (Buffer)
+ CXXFunctionalCastExpr(T, VK, Written, K, Op, PathSize, FPO, L, R);
if (PathSize)
std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
E->getTrailingObjects<CXXBaseSpecifier *>());
return E;
}
-CXXFunctionalCastExpr *
-CXXFunctionalCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) {
- void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
- return new (Buffer) CXXFunctionalCastExpr(EmptyShell(), PathSize);
+CXXFunctionalCastExpr *CXXFunctionalCastExpr::CreateEmpty(const ASTContext &C,
+ unsigned PathSize,
+ bool HasFPFeatures) {
+ void *Buffer =
+ C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *, FPOptionsOverride>(
+ PathSize, HasFPFeatures));
+ return new (Buffer)
+ CXXFunctionalCastExpr(EmptyShell(), PathSize, HasFPFeatures);
}
SourceLocation CXXFunctionalCastExpr::getBeginLoc() const {
@@ -846,37 +874,43 @@ SourceLocation CXXFunctionalCastExpr::getEndLoc() const {
UserDefinedLiteral::UserDefinedLiteral(Expr *Fn, ArrayRef<Expr *> Args,
QualType Ty, ExprValueKind VK,
SourceLocation LitEndLoc,
- SourceLocation SuffixLoc)
+ SourceLocation SuffixLoc,
+ FPOptionsOverride FPFeatures)
: CallExpr(UserDefinedLiteralClass, Fn, /*PreArgs=*/{}, Args, Ty, VK,
- LitEndLoc, /*MinNumArgs=*/0, NotADL),
+ LitEndLoc, FPFeatures, /*MinNumArgs=*/0, NotADL),
UDSuffixLoc(SuffixLoc) {}
-UserDefinedLiteral::UserDefinedLiteral(unsigned NumArgs, EmptyShell Empty)
- : CallExpr(UserDefinedLiteralClass, /*NumPreArgs=*/0, NumArgs, Empty) {}
+UserDefinedLiteral::UserDefinedLiteral(unsigned NumArgs, bool HasFPFeatures,
+ EmptyShell Empty)
+ : CallExpr(UserDefinedLiteralClass, /*NumPreArgs=*/0, NumArgs,
+ HasFPFeatures, Empty) {}
UserDefinedLiteral *UserDefinedLiteral::Create(const ASTContext &Ctx, Expr *Fn,
ArrayRef<Expr *> Args,
QualType Ty, ExprValueKind VK,
SourceLocation LitEndLoc,
- SourceLocation SuffixLoc) {
+ SourceLocation SuffixLoc,
+ FPOptionsOverride FPFeatures) {
// Allocate storage for the trailing objects of CallExpr.
unsigned NumArgs = Args.size();
- unsigned SizeOfTrailingObjects =
- CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ unsigned SizeOfTrailingObjects = CallExpr::sizeOfTrailingObjects(
+ /*NumPreArgs=*/0, NumArgs, FPFeatures.requiresTrailingStorage());
void *Mem = Ctx.Allocate(sizeof(UserDefinedLiteral) + SizeOfTrailingObjects,
alignof(UserDefinedLiteral));
- return new (Mem) UserDefinedLiteral(Fn, Args, Ty, VK, LitEndLoc, SuffixLoc);
+ return new (Mem)
+ UserDefinedLiteral(Fn, Args, Ty, VK, LitEndLoc, SuffixLoc, FPFeatures);
}
UserDefinedLiteral *UserDefinedLiteral::CreateEmpty(const ASTContext &Ctx,
unsigned NumArgs,
+ bool HasFPOptions,
EmptyShell Empty) {
// Allocate storage for the trailing objects of CallExpr.
unsigned SizeOfTrailingObjects =
- CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs, HasFPOptions);
void *Mem = Ctx.Allocate(sizeof(UserDefinedLiteral) + SizeOfTrailingObjects,
alignof(UserDefinedLiteral));
- return new (Mem) UserDefinedLiteral(NumArgs, Empty);
+ return new (Mem) UserDefinedLiteral(NumArgs, HasFPOptions, Empty);
}
UserDefinedLiteral::LiteralOperatorKind
@@ -925,7 +959,7 @@ CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx,
CXXDefaultInitExprBits.Loc = Loc;
assert(Field->hasInClassInitializer());
- setDependence(ExprDependence::None);
+ setDependence(computeDependence(this));
}
CXXTemporary *CXXTemporary::Create(const ASTContext &C,
@@ -1237,6 +1271,10 @@ ArrayRef<NamedDecl *> LambdaExpr::getExplicitTemplateParameters() const {
return Record->getLambdaExplicitTemplateParameters();
}
+Expr *LambdaExpr::getTrailingRequiresClause() const {
+ return getCallOperator()->getTrailingRequiresClause();
+}
+
bool LambdaExpr::isMutable() const { return !getCallOperator()->isConst(); }
LambdaExpr::child_range LambdaExpr::children() {
@@ -1282,12 +1320,12 @@ ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C,
return new (buffer) ExprWithCleanups(empty, numObjects);
}
-CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *TSI,
+CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(QualType T,
+ TypeSourceInfo *TSI,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc)
- : Expr(CXXUnresolvedConstructExprClass,
- TSI->getType().getNonReferenceType(),
+ : Expr(CXXUnresolvedConstructExprClass, T,
(TSI->getType()->isLValueReferenceType()
? VK_LValue
: TSI->getType()->isRValueReferenceType() ? VK_XValue
@@ -1302,10 +1340,11 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *TSI,
}
CXXUnresolvedConstructExpr *CXXUnresolvedConstructExpr::Create(
- const ASTContext &Context, TypeSourceInfo *TSI, SourceLocation LParenLoc,
+ const ASTContext &Context, QualType T, TypeSourceInfo *TSI, SourceLocation LParenLoc,
ArrayRef<Expr *> Args, SourceLocation RParenLoc) {
void *Mem = Context.Allocate(totalSizeToAlloc<Expr *>(Args.size()));
- return new (Mem) CXXUnresolvedConstructExpr(TSI, LParenLoc, Args, RParenLoc);
+ return new (Mem)
+ CXXUnresolvedConstructExpr(T, TSI, LParenLoc, Args, RParenLoc);
}
CXXUnresolvedConstructExpr *
@@ -1530,6 +1569,15 @@ SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context,
return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs);
}
+QualType SubstNonTypeTemplateParmExpr::getParameterType(
+ const ASTContext &Context) const {
+ // Note that, for a class type NTTP, we will have an lvalue of type 'const
+ // T', so we can't just compute this from the type and value category.
+ if (isReferenceParameter())
+ return Context.getLValueReferenceType(getType());
+ return getType().getUnqualifiedType();
+}
+
SubstNonTypeTemplateParmPackExpr::SubstNonTypeTemplateParmPackExpr(
QualType T, ExprValueKind ValueKind, NonTypeTemplateParmDecl *Param,
SourceLocation NameLoc, const TemplateArgument &ArgPack)
@@ -1603,6 +1651,20 @@ void MaterializeTemporaryExpr::setExtendingDecl(ValueDecl *ExtendedBy,
ES->ManglingNumber = ManglingNumber;
}
+bool MaterializeTemporaryExpr::isUsableInConstantExpressions(
+ const ASTContext &Context) const {
+ // C++20 [expr.const]p4:
+ // An object or reference is usable in constant expressions if it is [...]
+ // a temporary object of non-volatile const-qualified literal type
+ // whose lifetime is extended to that of a variable that is usable
+ // in constant expressions
+ auto *VD = dyn_cast_or_null<VarDecl>(getExtendingDecl());
+ return VD && getType().isConstant(Context) &&
+ !getType().isVolatileQualified() &&
+ getType()->isLiteralType(Context) &&
+ VD->isUsableInConstantExpressions(Context);
+}
+
TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc, bool Value)
@@ -1643,34 +1705,39 @@ TypeTraitExpr *TypeTraitExpr::CreateDeserialized(const ASTContext &C,
CUDAKernelCallExpr::CUDAKernelCallExpr(Expr *Fn, CallExpr *Config,
ArrayRef<Expr *> Args, QualType Ty,
ExprValueKind VK, SourceLocation RP,
+ FPOptionsOverride FPFeatures,
unsigned MinNumArgs)
: CallExpr(CUDAKernelCallExprClass, Fn, /*PreArgs=*/Config, Args, Ty, VK,
- RP, MinNumArgs, NotADL) {}
+ RP, FPFeatures, MinNumArgs, NotADL) {}
-CUDAKernelCallExpr::CUDAKernelCallExpr(unsigned NumArgs, EmptyShell Empty)
+CUDAKernelCallExpr::CUDAKernelCallExpr(unsigned NumArgs, bool HasFPFeatures,
+ EmptyShell Empty)
: CallExpr(CUDAKernelCallExprClass, /*NumPreArgs=*/END_PREARG, NumArgs,
- Empty) {}
+ HasFPFeatures, Empty) {}
CUDAKernelCallExpr *
CUDAKernelCallExpr::Create(const ASTContext &Ctx, Expr *Fn, CallExpr *Config,
ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
- SourceLocation RP, unsigned MinNumArgs) {
+ SourceLocation RP, FPOptionsOverride FPFeatures,
+ unsigned MinNumArgs) {
// Allocate storage for the trailing objects of CallExpr.
unsigned NumArgs = std::max<unsigned>(Args.size(), MinNumArgs);
- unsigned SizeOfTrailingObjects =
- CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/END_PREARG, NumArgs);
+ unsigned SizeOfTrailingObjects = CallExpr::sizeOfTrailingObjects(
+ /*NumPreArgs=*/END_PREARG, NumArgs, FPFeatures.requiresTrailingStorage());
void *Mem = Ctx.Allocate(sizeof(CUDAKernelCallExpr) + SizeOfTrailingObjects,
alignof(CUDAKernelCallExpr));
- return new (Mem) CUDAKernelCallExpr(Fn, Config, Args, Ty, VK, RP, MinNumArgs);
+ return new (Mem)
+ CUDAKernelCallExpr(Fn, Config, Args, Ty, VK, RP, FPFeatures, MinNumArgs);
}
CUDAKernelCallExpr *CUDAKernelCallExpr::CreateEmpty(const ASTContext &Ctx,
unsigned NumArgs,
+ bool HasFPFeatures,
EmptyShell Empty) {
// Allocate storage for the trailing objects of CallExpr.
- unsigned SizeOfTrailingObjects =
- CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/END_PREARG, NumArgs);
+ unsigned SizeOfTrailingObjects = CallExpr::sizeOfTrailingObjects(
+ /*NumPreArgs=*/END_PREARG, NumArgs, HasFPFeatures);
void *Mem = Ctx.Allocate(sizeof(CUDAKernelCallExpr) + SizeOfTrailingObjects,
alignof(CUDAKernelCallExpr));
- return new (Mem) CUDAKernelCallExpr(NumArgs, Empty);
+ return new (Mem) CUDAKernelCallExpr(NumArgs, HasFPFeatures, Empty);
}
diff --git a/clang/lib/AST/ExprClassification.cpp b/clang/lib/AST/ExprClassification.cpp
index 31aa734ffedb..0286c730ce4e 100644
--- a/clang/lib/AST/ExprClassification.cpp
+++ b/clang/lib/AST/ExprClassification.cpp
@@ -453,12 +453,14 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
bool islvalue;
if (const auto *NTTParm = dyn_cast<NonTypeTemplateParmDecl>(D))
- islvalue = NTTParm->getType()->isReferenceType();
+ islvalue = NTTParm->getType()->isReferenceType() ||
+ NTTParm->getType()->isRecordType();
else
islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
isa<IndirectFieldDecl>(D) ||
isa<BindingDecl>(D) ||
isa<MSGuidDecl>(D) ||
+ isa<TemplateParamObjectDecl>(D) ||
(Ctx.getLangOpts().CPlusPlus &&
(isa<FunctionDecl>(D) || isa<MSPropertyDecl>(D) ||
isa<FunctionTemplateDecl>(D)));
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index d20c2382b6ac..56181bbe1166 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -50,8 +50,8 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
-#include "clang/Basic/FixedPoint.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/APFixedPoint.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/Support/Debug.h"
@@ -63,9 +63,11 @@
#define DEBUG_TYPE "exprconstant"
using namespace clang;
+using llvm::APFixedPoint;
using llvm::APInt;
using llvm::APSInt;
using llvm::APFloat;
+using llvm::FixedPointSemantics;
using llvm::Optional;
namespace {
@@ -77,48 +79,7 @@ namespace {
CurrentSourceLocExprScope::SourceLocExprScopeGuard;
static QualType getType(APValue::LValueBase B) {
- if (!B) return QualType();
- if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
- // FIXME: It's unclear where we're supposed to take the type from, and
- // this actually matters for arrays of unknown bound. Eg:
- //
- // extern int arr[]; void f() { extern int arr[3]; };
- // constexpr int *p = &arr[1]; // valid?
- //
- // For now, we take the array bound from the most recent declaration.
- for (auto *Redecl = cast<ValueDecl>(D->getMostRecentDecl()); Redecl;
- Redecl = cast_or_null<ValueDecl>(Redecl->getPreviousDecl())) {
- QualType T = Redecl->getType();
- if (!T->isIncompleteArrayType())
- return T;
- }
- return D->getType();
- }
-
- if (B.is<TypeInfoLValue>())
- return B.getTypeInfoType();
-
- if (B.is<DynamicAllocLValue>())
- return B.getDynamicAllocType();
-
- const Expr *Base = B.get<const Expr*>();
-
- // For a materialized temporary, the type of the temporary we materialized
- // may not be the type of the expression.
- if (const MaterializeTemporaryExpr *MTE =
- dyn_cast<MaterializeTemporaryExpr>(Base)) {
- SmallVector<const Expr *, 2> CommaLHSs;
- SmallVector<SubobjectAdjustment, 2> Adjustments;
- const Expr *Temp = MTE->getSubExpr();
- const Expr *Inner = Temp->skipRValueSubobjectAdjustments(CommaLHSs,
- Adjustments);
- // Keep any cv-qualifiers from the reference if we generated a temporary
- // for it directly. Otherwise use the type after adjustment.
- if (!Adjustments.empty())
- return Inner->getType();
- }
-
- return Base->getType();
+ return B.getType();
}
/// Get an LValue path entry, which is known to not be an array index, as a
@@ -181,6 +142,37 @@ namespace {
return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E);
}
+ /// Determines whether the given kind of constant expression is only ever
+ /// used for name mangling. If so, it's permitted to reference things that we
+ /// can't generate code for (in particular, dllimported functions).
+ static bool isForManglingOnly(ConstantExprKind Kind) {
+ switch (Kind) {
+ case ConstantExprKind::Normal:
+ case ConstantExprKind::ClassTemplateArgument:
+ case ConstantExprKind::ImmediateInvocation:
+ // Note that non-type template arguments of class type are emitted as
+ // template parameter objects.
+ return false;
+
+ case ConstantExprKind::NonClassTemplateArgument:
+ return true;
+ }
+ llvm_unreachable("unknown ConstantExprKind");
+ }
+
+ static bool isTemplateArgument(ConstantExprKind Kind) {
+ switch (Kind) {
+ case ConstantExprKind::Normal:
+ case ConstantExprKind::ImmediateInvocation:
+ return false;
+
+ case ConstantExprKind::ClassTemplateArgument:
+ case ConstantExprKind::NonClassTemplateArgument:
+ return true;
+ }
+ llvm_unreachable("unknown ConstantExprKind");
+ }
+
/// The bound to claim that an array of unknown bound has.
/// The value in MostDerivedArraySize is undefined in this case. So, set it
/// to an arbitrary value that's likely to loudly break things if it's used.
@@ -488,6 +480,39 @@ namespace {
}
};
+ /// A scope at the end of which an object can need to be destroyed.
+ enum class ScopeKind {
+ Block,
+ FullExpression,
+ Call
+ };
+
+ /// A reference to a particular call and its arguments.
+ struct CallRef {
+ CallRef() : OrigCallee(), CallIndex(0), Version() {}
+ CallRef(const FunctionDecl *Callee, unsigned CallIndex, unsigned Version)
+ : OrigCallee(Callee), CallIndex(CallIndex), Version(Version) {}
+
+ explicit operator bool() const { return OrigCallee; }
+
+ /// Get the parameter that the caller initialized, corresponding to the
+ /// given parameter in the callee.
+ const ParmVarDecl *getOrigParam(const ParmVarDecl *PVD) const {
+ return OrigCallee ? OrigCallee->getParamDecl(PVD->getFunctionScopeIndex())
+ : PVD;
+ }
+
+ /// The callee at the point where the arguments were evaluated. This might
+ /// be different from the actual callee (a different redeclaration, or a
+ /// virtual override), but this function's parameters are the ones that
+ /// appear in the parameter map.
+ const FunctionDecl *OrigCallee;
+ /// The call index of the frame that holds the argument values.
+ unsigned CallIndex;
+ /// The version of the parameters corresponding to this call.
+ unsigned Version;
+ };
+
/// A stack frame in the constexpr call stack.
class CallStackFrame : public interp::Frame {
public:
@@ -502,9 +527,10 @@ namespace {
/// This - The binding for the this pointer in this call, if any.
const LValue *This;
- /// Arguments - Parameter bindings for this function call, indexed by
- /// parameters' function scope indices.
- APValue *Arguments;
+ /// Information on how to find the arguments to this call. Our arguments
+ /// are stored in our parent's CallStackFrame, using the ParmVarDecl* as a
+ /// key and this value as the version.
+ CallRef Arguments;
/// Source location information about the default argument or default
/// initializer expression we're evaluating, if any.
@@ -537,6 +563,10 @@ namespace {
TempVersionStack.pop_back();
}
+ CallRef createCall(const FunctionDecl *Callee) {
+ return {Callee, Index, ++CurTempVersion};
+ }
+
// FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
// on the overall stack usage of deeply-recursing constexpr evaluations.
// (We should cache this map rather than recomputing it repeatedly.)
@@ -550,7 +580,7 @@ namespace {
CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
- APValue *Arguments);
+ CallRef Arguments);
~CallStackFrame();
// Return the temporary for Key whose version number is Version.
@@ -589,7 +619,10 @@ namespace {
/// bumping the temporary version number.
template<typename KeyT>
APValue &createTemporary(const KeyT *Key, QualType T,
- bool IsLifetimeExtended, LValue &LV);
+ ScopeKind Scope, LValue &LV);
+
+ /// Allocate storage for a parameter of a function call made in this frame.
+ APValue &createParam(CallRef Args, const ParmVarDecl *PVD, LValue &LV);
void describe(llvm::raw_ostream &OS) override;
@@ -603,6 +636,10 @@ namespace {
return true;
return false;
}
+
+ private:
+ APValue &createLocal(APValue::LValueBase Base, const void *Key, QualType T,
+ ScopeKind Scope);
};
/// Temporarily override 'this'.
@@ -631,16 +668,20 @@ static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc,
namespace {
/// A cleanup, and a flag indicating whether it is lifetime-extended.
class Cleanup {
- llvm::PointerIntPair<APValue*, 1, bool> Value;
+ llvm::PointerIntPair<APValue*, 2, ScopeKind> Value;
APValue::LValueBase Base;
QualType T;
public:
Cleanup(APValue *Val, APValue::LValueBase Base, QualType T,
- bool IsLifetimeExtended)
- : Value(Val, IsLifetimeExtended), Base(Base), T(T) {}
+ ScopeKind Scope)
+ : Value(Val, Scope), Base(Base), T(T) {}
- bool isLifetimeExtended() const { return Value.getInt(); }
+ /// Determine whether this cleanup should be performed at the end of the
+ /// given kind of scope.
+ bool isDestroyedAtEndOf(ScopeKind K) const {
+ return (int)Value.getInt() >= (int)K;
+ }
bool endLifetime(EvalInfo &Info, bool RunDestructors) {
if (RunDestructors) {
SourceLocation Loc;
@@ -926,7 +967,7 @@ namespace {
CallStackDepth(0), NextCallIndex(1),
StepsLeft(C.getLangOpts().ConstexprStepLimit),
EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp),
- BottomFrame(*this, SourceLocation(), nullptr, nullptr, nullptr),
+ BottomFrame(*this, SourceLocation(), nullptr, nullptr, CallRef()),
EvaluatingDecl((const ValueDecl *)nullptr),
EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
HasFoldFailureDiagnostic(false), InConstantContext(false),
@@ -995,6 +1036,13 @@ namespace {
return Result;
}
+ /// Get the allocated storage for the given parameter of the given call.
+ APValue *getParamSlot(CallRef Call, const ParmVarDecl *PVD) {
+ CallStackFrame *Frame = getCallFrameAndDepth(Call.CallIndex).first;
+ return Frame ? Frame->getTemporary(Call.getOrigParam(PVD), Call.Version)
+ : nullptr;
+ }
+
/// Information about a stack frame for std::allocator<T>::[de]allocate.
struct StdAllocatorCaller {
unsigned FrameIndex;
@@ -1030,10 +1078,13 @@ namespace {
void performLifetimeExtension() {
// Disable the cleanups for lifetime-extended temporaries.
- CleanupStack.erase(
- std::remove_if(CleanupStack.begin(), CleanupStack.end(),
- [](Cleanup &C) { return C.isLifetimeExtended(); }),
- CleanupStack.end());
+ CleanupStack.erase(std::remove_if(CleanupStack.begin(),
+ CleanupStack.end(),
+ [](Cleanup &C) {
+ return !C.isDestroyedAtEndOf(
+ ScopeKind::FullExpression);
+ }),
+ CleanupStack.end());
}
/// Throw away any remaining cleanups at the end of evaluation. If any
@@ -1282,7 +1333,7 @@ namespace {
/// RAII object wrapping a full-expression or block scope, and handling
/// the ending of the lifetime of temporaries created within it.
- template<bool IsFullExpression>
+ template<ScopeKind Kind>
class ScopeRAII {
EvalInfo &Info;
unsigned OldStackSize;
@@ -1315,8 +1366,7 @@ namespace {
// for a full-expression scope.
bool Success = true;
for (unsigned I = Info.CleanupStack.size(); I > OldStackSize; --I) {
- if (!(IsFullExpression &&
- Info.CleanupStack[I - 1].isLifetimeExtended())) {
+ if (Info.CleanupStack[I - 1].isDestroyedAtEndOf(Kind)) {
if (!Info.CleanupStack[I - 1].endLifetime(Info, RunDestructors)) {
Success = false;
break;
@@ -1324,18 +1374,20 @@ namespace {
}
}
- // Compact lifetime-extended cleanups.
+ // Compact any retained cleanups.
auto NewEnd = Info.CleanupStack.begin() + OldStackSize;
- if (IsFullExpression)
+ if (Kind != ScopeKind::Block)
NewEnd =
- std::remove_if(NewEnd, Info.CleanupStack.end(),
- [](Cleanup &C) { return !C.isLifetimeExtended(); });
+ std::remove_if(NewEnd, Info.CleanupStack.end(), [](Cleanup &C) {
+ return C.isDestroyedAtEndOf(Kind);
+ });
Info.CleanupStack.erase(NewEnd, Info.CleanupStack.end());
return Success;
}
};
- typedef ScopeRAII<false> BlockScopeRAII;
- typedef ScopeRAII<true> FullExpressionRAII;
+ typedef ScopeRAII<ScopeKind::Block> BlockScopeRAII;
+ typedef ScopeRAII<ScopeKind::FullExpression> FullExpressionRAII;
+ typedef ScopeRAII<ScopeKind::Call> CallScopeRAII;
}
bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
@@ -1378,9 +1430,9 @@ void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
- APValue *Arguments)
+ CallRef Call)
: Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This),
- Arguments(Arguments), CallLoc(CallLoc), Index(Info.NextCallIndex++) {
+ Arguments(Call), CallLoc(CallLoc), Index(Info.NextCallIndex++) {
Info.CurrentCall = this;
++Info.CallStackDepth;
}
@@ -1541,7 +1593,7 @@ namespace {
}
void setNull(ASTContext &Ctx, QualType PointerTy) {
- Base = (Expr *)nullptr;
+ Base = (const ValueDecl *)nullptr;
Offset =
CharUnits::fromQuantity(Ctx.getTargetNullPointerValue(PointerTy));
InvalidBase = false;
@@ -1793,14 +1845,33 @@ static void negateAsSigned(APSInt &Int) {
template<typename KeyT>
APValue &CallStackFrame::createTemporary(const KeyT *Key, QualType T,
- bool IsLifetimeExtended, LValue &LV) {
+ ScopeKind Scope, LValue &LV) {
unsigned Version = getTempVersion();
APValue::LValueBase Base(Key, Index, Version);
LV.set(Base);
+ return createLocal(Base, Key, T, Scope);
+}
+
+/// Allocate storage for a parameter of a function call made in this frame.
+APValue &CallStackFrame::createParam(CallRef Args, const ParmVarDecl *PVD,
+ LValue &LV) {
+ assert(Args.CallIndex == Index && "creating parameter in wrong frame");
+ APValue::LValueBase Base(PVD, Index, Args.Version);
+ LV.set(Base);
+ // We always destroy parameters at the end of the call, even if we'd allow
+ // them to live to the end of the full-expression at runtime, in order to
+ // give portable results and match other compilers.
+ return createLocal(Base, PVD, PVD->getType(), ScopeKind::Call);
+}
+
+APValue &CallStackFrame::createLocal(APValue::LValueBase Base, const void *Key,
+ QualType T, ScopeKind Scope) {
+ assert(Base.getCallIndex() == Index && "lvalue for wrong frame");
+ unsigned Version = Base.getVersion();
APValue &Result = Temporaries[MapKeyTy(Key, Version)];
- assert(Result.isAbsent() && "temporary created multiple times");
+ assert(Result.isAbsent() && "local created multiple times");
- // If we're creating a temporary immediately in the operand of a speculative
+ // If we're creating a local immediately in the operand of a speculative
// evaluation, don't register a cleanup to be run outside the speculative
// evaluation context, since we won't actually be able to initialize this
// object.
@@ -1808,7 +1879,7 @@ APValue &CallStackFrame::createTemporary(const KeyT *Key, QualType T,
if (T.isDestructedType())
Info.noteSideEffect();
} else {
- Info.CleanupStack.push_back(Cleanup(&Result, Base, T, IsLifetimeExtended));
+ Info.CleanupStack.push_back(Cleanup(&Result, Base, T, Scope));
}
return Result;
}
@@ -1854,8 +1925,11 @@ void CallStackFrame::describe(raw_ostream &Out) {
Out << ", ";
const ParmVarDecl *Param = *I;
- const APValue &Arg = Arguments[ArgIndex];
- Arg.printPretty(Out, Info.Ctx, Param->getType());
+ APValue *V = Info.getParamSlot(Arguments, Param);
+ if (V)
+ V->printPretty(Out, Info.Ctx, Param->getType());
+ else
+ Out << "<...>";
if (ArgIndex == 0 && IsMemberCall)
Out << "->" << *Callee << '(';
@@ -1868,6 +1942,7 @@ void CallStackFrame::describe(raw_ostream &Out) {
/// result.
/// \return \c true if the caller should keep evaluating.
static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
+ assert(!E->isValueDependent());
APValue Scratch;
if (!Evaluate(Scratch, Info, E))
// We don't need the value, but we might have skipped a side effect here.
@@ -1894,6 +1969,8 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
// ... the address of an object with static storage duration,
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->hasGlobalStorage();
+ if (isa<TemplateParamObjectDecl>(D))
+ return true;
// ... the address of a function,
// ... the address of a GUID [MS extension],
return isa<FunctionDecl>(D) || isa<MSGuidDecl>(D);
@@ -1976,23 +2053,32 @@ static bool HasSameBase(const LValue &A, const LValue &B) {
return false;
if (A.getLValueBase().getOpaqueValue() !=
- B.getLValueBase().getOpaqueValue()) {
- const Decl *ADecl = GetLValueBaseDecl(A);
- if (!ADecl)
- return false;
- const Decl *BDecl = GetLValueBaseDecl(B);
- if (!BDecl || ADecl->getCanonicalDecl() != BDecl->getCanonicalDecl())
- return false;
- }
+ B.getLValueBase().getOpaqueValue())
+ return false;
- return IsGlobalLValue(A.getLValueBase()) ||
- (A.getLValueCallIndex() == B.getLValueCallIndex() &&
- A.getLValueVersion() == B.getLValueVersion());
+ return A.getLValueCallIndex() == B.getLValueCallIndex() &&
+ A.getLValueVersion() == B.getLValueVersion();
}
static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
assert(Base && "no location for a null lvalue");
const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+
+ // For a parameter, find the corresponding call stack frame (if it still
+ // exists), and point at the parameter of the function definition we actually
+ // invoked.
+ if (auto *PVD = dyn_cast_or_null<ParmVarDecl>(VD)) {
+ unsigned Idx = PVD->getFunctionScopeIndex();
+ for (CallStackFrame *F = Info.CurrentCall; F; F = F->Caller) {
+ if (F->Arguments.CallIndex == Base.getCallIndex() &&
+ F->Arguments.Version == Base.getVersion() && F->Callee &&
+ Idx < F->Callee->getNumParams()) {
+ VD = F->Callee->getParamDecl(Idx);
+ break;
+ }
+ }
+ }
+
if (VD)
Info.Note(VD->getLocation(), diag::note_declared_at);
else if (const Expr *E = Base.dyn_cast<const Expr*>())
@@ -2019,7 +2105,7 @@ using CheckedTemporaries =
static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value,
- Expr::ConstExprUsage Usage,
+ ConstantExprKind Kind,
SourceLocation SubobjectLoc,
CheckedTemporaries &CheckedTemps);
@@ -2028,21 +2114,48 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
/// can fold this expression, whether or not it's a constant expression.
static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
QualType Type, const LValue &LVal,
- Expr::ConstExprUsage Usage,
+ ConstantExprKind Kind,
CheckedTemporaries &CheckedTemps) {
bool IsReferenceType = Type->isReferenceType();
APValue::LValueBase Base = LVal.getLValueBase();
const SubobjectDesignator &Designator = LVal.getLValueDesignator();
- if (auto *VD = LVal.getLValueBase().dyn_cast<const ValueDecl *>()) {
- if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
- if (FD->isConsteval()) {
- Info.FFDiag(Loc, diag::note_consteval_address_accessible)
- << !Type->isAnyPointerType();
- Info.Note(FD->getLocation(), diag::note_declared_at);
- return false;
- }
+ const Expr *BaseE = Base.dyn_cast<const Expr *>();
+ const ValueDecl *BaseVD = Base.dyn_cast<const ValueDecl*>();
+
+ // Additional restrictions apply in a template argument. We only enforce the
+ // C++20 restrictions here; additional syntactic and semantic restrictions
+ // are applied elsewhere.
+ if (isTemplateArgument(Kind)) {
+ int InvalidBaseKind = -1;
+ StringRef Ident;
+ if (Base.is<TypeInfoLValue>())
+ InvalidBaseKind = 0;
+ else if (isa_and_nonnull<StringLiteral>(BaseE))
+ InvalidBaseKind = 1;
+ else if (isa_and_nonnull<MaterializeTemporaryExpr>(BaseE) ||
+ isa_and_nonnull<LifetimeExtendedTemporaryDecl>(BaseVD))
+ InvalidBaseKind = 2;
+ else if (auto *PE = dyn_cast_or_null<PredefinedExpr>(BaseE)) {
+ InvalidBaseKind = 3;
+ Ident = PE->getIdentKindName();
+ }
+
+ if (InvalidBaseKind != -1) {
+ Info.FFDiag(Loc, diag::note_constexpr_invalid_template_arg)
+ << IsReferenceType << !Designator.Entries.empty() << InvalidBaseKind
+ << Ident;
+ return false;
+ }
+ }
+
+ if (auto *FD = dyn_cast_or_null<FunctionDecl>(BaseVD)) {
+ if (FD->isConsteval()) {
+ Info.FFDiag(Loc, diag::note_consteval_address_accessible)
+ << !Type->isAnyPointerType();
+ Info.Note(FD->getLocation(), diag::note_declared_at);
+ return false;
}
}
@@ -2055,7 +2168,20 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
Info.FFDiag(Loc, diag::note_constexpr_non_global, 1)
<< IsReferenceType << !Designator.Entries.empty()
<< !!VD << VD;
- NoteLValueLocation(Info, Base);
+
+ auto *VarD = dyn_cast_or_null<VarDecl>(VD);
+ if (VarD && VarD->isConstexpr()) {
+ // Non-static local constexpr variables have unintuitive semantics:
+ // constexpr int a = 1;
+ // constexpr const int *p = &a;
+ // ... is invalid because the address of 'a' is not constant. Suggest
+ // adding a 'static' in this case.
+ Info.Note(VarD->getLocation(), diag::note_constexpr_not_static)
+ << VarD
+ << FixItHint::CreateInsertion(VarD->getBeginLoc(), "static ");
+ } else {
+ NoteLValueLocation(Info, Base);
+ }
} else {
Info.FFDiag(Loc);
}
@@ -2073,19 +2199,20 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
return false;
}
- if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
- if (const VarDecl *Var = dyn_cast<const VarDecl>(VD)) {
+ if (BaseVD) {
+ if (const VarDecl *Var = dyn_cast<const VarDecl>(BaseVD)) {
// Check if this is a thread-local variable.
if (Var->getTLSKind())
// FIXME: Diagnostic!
return false;
- // A dllimport variable never acts like a constant.
- if (Usage == Expr::EvaluateForCodeGen && Var->hasAttr<DLLImportAttr>())
+ // A dllimport variable never acts like a constant, unless we're
+ // evaluating a value for use only in name mangling.
+ if (!isForManglingOnly(Kind) && Var->hasAttr<DLLImportAttr>())
// FIXME: Diagnostic!
return false;
}
- if (const auto *FD = dyn_cast<const FunctionDecl>(VD)) {
+ if (const auto *FD = dyn_cast<const FunctionDecl>(BaseVD)) {
// __declspec(dllimport) must be handled very carefully:
// We must never initialize an expression with the thunk in C++.
// Doing otherwise would allow the same id-expression to yield
@@ -2096,18 +2223,18 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
// The C language has no notion of ODR; furthermore, it has no notion of
// dynamic initialization. This means that we are permitted to
// perform initialization with the address of the thunk.
- if (Info.getLangOpts().CPlusPlus && Usage == Expr::EvaluateForCodeGen &&
+ if (Info.getLangOpts().CPlusPlus && !isForManglingOnly(Kind) &&
FD->hasAttr<DLLImportAttr>())
// FIXME: Diagnostic!
return false;
}
- } else if (const auto *MTE = dyn_cast_or_null<MaterializeTemporaryExpr>(
- Base.dyn_cast<const Expr *>())) {
+ } else if (const auto *MTE =
+ dyn_cast_or_null<MaterializeTemporaryExpr>(BaseE)) {
if (CheckedTemps.insert(MTE).second) {
QualType TempType = getType(Base);
if (TempType.isDestructedType()) {
Info.FFDiag(MTE->getExprLoc(),
- diag::note_constexpr_unsupported_tempoarary_nontrivial_dtor)
+ diag::note_constexpr_unsupported_temporary_nontrivial_dtor)
<< TempType;
return false;
}
@@ -2116,7 +2243,7 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
assert(V && "evasluation result refers to uninitialised temporary");
if (!CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression,
Info, MTE->getExprLoc(), TempType, *V,
- Usage, SourceLocation(), CheckedTemps))
+ Kind, SourceLocation(), CheckedTemps))
return false;
}
}
@@ -2135,9 +2262,8 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
// Does this refer one past the end of some object?
if (!Designator.Invalid && Designator.isOnePastTheEnd()) {
- const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
Info.FFDiag(Loc, diag::note_constexpr_past_end, 1)
- << !Designator.Entries.empty() << !!VD << VD;
+ << !Designator.Entries.empty() << !!BaseVD << BaseVD;
NoteLValueLocation(Info, Base);
}
@@ -2150,7 +2276,7 @@ static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
SourceLocation Loc,
QualType Type,
const APValue &Value,
- Expr::ConstExprUsage Usage) {
+ ConstantExprKind Kind) {
const ValueDecl *Member = Value.getMemberPointerDecl();
const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Member);
if (!FD)
@@ -2160,7 +2286,7 @@ static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
Info.Note(FD->getLocation(), diag::note_declared_at);
return false;
}
- return Usage == Expr::EvaluateForMangling || FD->isVirtual() ||
+ return isForManglingOnly(Kind) || FD->isVirtual() ||
!FD->hasAttr<DLLImportAttr>();
}
@@ -2199,7 +2325,7 @@ static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value,
- Expr::ConstExprUsage Usage,
+ ConstantExprKind Kind,
SourceLocation SubobjectLoc,
CheckedTemporaries &CheckedTemps) {
if (!Value.hasValue()) {
@@ -2222,20 +2348,20 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
if (!CheckEvaluationResult(CERK, Info, DiagLoc, EltTy,
- Value.getArrayInitializedElt(I), Usage,
+ Value.getArrayInitializedElt(I), Kind,
SubobjectLoc, CheckedTemps))
return false;
}
if (!Value.hasArrayFiller())
return true;
return CheckEvaluationResult(CERK, Info, DiagLoc, EltTy,
- Value.getArrayFiller(), Usage, SubobjectLoc,
+ Value.getArrayFiller(), Kind, SubobjectLoc,
CheckedTemps);
}
if (Value.isUnion() && Value.getUnionField()) {
return CheckEvaluationResult(
CERK, Info, DiagLoc, Value.getUnionField()->getType(),
- Value.getUnionValue(), Usage, Value.getUnionField()->getLocation(),
+ Value.getUnionValue(), Kind, Value.getUnionField()->getLocation(),
CheckedTemps);
}
if (Value.isStruct()) {
@@ -2244,7 +2370,7 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
unsigned BaseIndex = 0;
for (const CXXBaseSpecifier &BS : CD->bases()) {
if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(),
- Value.getStructBase(BaseIndex), Usage,
+ Value.getStructBase(BaseIndex), Kind,
BS.getBeginLoc(), CheckedTemps))
return false;
++BaseIndex;
@@ -2256,7 +2382,7 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
if (!CheckEvaluationResult(CERK, Info, DiagLoc, I->getType(),
Value.getStructField(I->getFieldIndex()),
- Usage, I->getLocation(), CheckedTemps))
+ Kind, I->getLocation(), CheckedTemps))
return false;
}
}
@@ -2265,13 +2391,13 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
CERK == CheckEvaluationResultKind::ConstantExpression) {
LValue LVal;
LVal.setFrom(Info.Ctx, Value);
- return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal, Usage,
+ return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal, Kind,
CheckedTemps);
}
if (Value.isMemberPointer() &&
CERK == CheckEvaluationResultKind::ConstantExpression)
- return CheckMemberPointerConstantExpression(Info, DiagLoc, Type, Value, Usage);
+ return CheckMemberPointerConstantExpression(Info, DiagLoc, Type, Value, Kind);
// Everything else is fine.
return true;
@@ -2280,13 +2406,16 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
/// Check that this core constant expression value is a valid value for a
/// constant expression. If not, report an appropriate diagnostic. Does not
/// check that the expression is of literal type.
-static bool
-CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, QualType Type,
- const APValue &Value,
- Expr::ConstExprUsage Usage = Expr::EvaluateForCodeGen) {
+static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
+ QualType Type, const APValue &Value,
+ ConstantExprKind Kind) {
+ // Nothing to check for a constant expression of type 'cv void'.
+ if (Type->isVoidType())
+ return true;
+
CheckedTemporaries CheckedTemps;
return CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression,
- Info, DiagLoc, Type, Value, Usage,
+ Info, DiagLoc, Type, Value, Kind,
SourceLocation(), CheckedTemps);
}
@@ -2297,7 +2426,7 @@ static bool CheckFullyInitialized(EvalInfo &Info, SourceLocation DiagLoc,
CheckedTemporaries CheckedTemps;
return CheckEvaluationResult(
CheckEvaluationResultKind::FullyInitialized, Info, DiagLoc, Type, Value,
- Expr::EvaluateForCodeGen, SourceLocation(), CheckedTemps);
+ ConstantExprKind::Normal, SourceLocation(), CheckedTemps);
}
/// Enforce C++2a [expr.const]/4.17, which disallows new-expressions unless
@@ -2369,6 +2498,7 @@ static bool HandleConversionToBool(const APValue &Val, bool &Result) {
static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
EvalInfo &Info) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && "missing lvalue-to-rvalue conv in bool condition");
APValue Val;
if (!Evaluate(Val, Info, E))
@@ -2399,14 +2529,74 @@ static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
return true;
}
+/// Get rounding mode used for evaluation of the specified expression.
+/// \param[out] DynamicRM Is set to true is the requested rounding mode is
+/// dynamic.
+/// If rounding mode is unknown at compile time, still try to evaluate the
+/// expression. If the result is exact, it does not depend on rounding mode.
+/// So return "tonearest" mode instead of "dynamic".
+static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E,
+ bool &DynamicRM) {
+ llvm::RoundingMode RM =
+ E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()).getRoundingMode();
+ DynamicRM = (RM == llvm::RoundingMode::Dynamic);
+ if (DynamicRM)
+ RM = llvm::RoundingMode::NearestTiesToEven;
+ return RM;
+}
+
+/// Check if the given evaluation result is allowed for constant evaluation.
+static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E,
+ APFloat::opStatus St) {
+ // In a constant context, assume that any dynamic rounding mode or FP
+ // exception state matches the default floating-point environment.
+ if (Info.InConstantContext)
+ return true;
+
+ FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ if ((St & APFloat::opInexact) &&
+ FPO.getRoundingMode() == llvm::RoundingMode::Dynamic) {
+ // Inexact result means that it depends on rounding mode. If the requested
+ // mode is dynamic, the evaluation cannot be made in compile time.
+ Info.FFDiag(E, diag::note_constexpr_dynamic_rounding);
+ return false;
+ }
+
+ if ((St != APFloat::opOK) &&
+ (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic ||
+ FPO.getFPExceptionMode() != LangOptions::FPE_Ignore ||
+ FPO.getAllowFEnvAccess())) {
+ Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
+ return false;
+ }
+
+ if ((St & APFloat::opStatus::opInvalidOp) &&
+ FPO.getFPExceptionMode() != LangOptions::FPE_Ignore) {
+ // There is no usefully definable result.
+ Info.FFDiag(E);
+ return false;
+ }
+
+ // FIXME: if:
+ // - evaluation triggered other FP exception, and
+ // - exception mode is not "ignore", and
+ // - the expression being evaluated is not a part of global variable
+ // initializer,
+ // the evaluation probably need to be rejected.
+ return true;
+}
+
static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
QualType SrcType, QualType DestType,
APFloat &Result) {
+ assert(isa<CastExpr>(E) || isa<CompoundAssignOperator>(E));
+ bool DynamicRM;
+ llvm::RoundingMode RM = getActiveRoundingMode(Info, E, DynamicRM);
+ APFloat::opStatus St;
APFloat Value = Result;
bool ignored;
- Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
- APFloat::rmNearestTiesToEven, &ignored);
- return true;
+ St = Result.convert(Info.Ctx.getFloatTypeSemantics(DestType), RM, &ignored);
+ return checkFloatingPointResult(Info, E, St);
}
static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
@@ -2423,11 +2613,17 @@ static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
}
static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
+ const FPOptions FPO,
QualType SrcType, const APSInt &Value,
QualType DestType, APFloat &Result) {
Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
- Result.convertFromAPInt(Value, Value.isSigned(),
- APFloat::rmNearestTiesToEven);
+ APFloat::opStatus St = Result.convertFromAPInt(Value, Value.isSigned(),
+ APFloat::rmNearestTiesToEven);
+ if (!Info.InConstantContext && St != llvm::APFloatBase::opOK &&
+ FPO.isFPConstrained()) {
+ Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
+ return false;
+ }
return true;
}
@@ -2628,28 +2824,31 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
}
/// Perform the given binary floating-point operation, in-place, on LHS.
-static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E,
+static bool handleFloatFloatBinOp(EvalInfo &Info, const BinaryOperator *E,
APFloat &LHS, BinaryOperatorKind Opcode,
const APFloat &RHS) {
+ bool DynamicRM;
+ llvm::RoundingMode RM = getActiveRoundingMode(Info, E, DynamicRM);
+ APFloat::opStatus St;
switch (Opcode) {
default:
Info.FFDiag(E);
return false;
case BO_Mul:
- LHS.multiply(RHS, APFloat::rmNearestTiesToEven);
+ St = LHS.multiply(RHS, RM);
break;
case BO_Add:
- LHS.add(RHS, APFloat::rmNearestTiesToEven);
+ St = LHS.add(RHS, RM);
break;
case BO_Sub:
- LHS.subtract(RHS, APFloat::rmNearestTiesToEven);
+ St = LHS.subtract(RHS, RM);
break;
case BO_Div:
// [expr.mul]p4:
// If the second operand of / or % is zero the behavior is undefined.
if (RHS.isZero())
Info.CCEDiag(E, diag::note_expr_divide_by_zero);
- LHS.divide(RHS, APFloat::rmNearestTiesToEven);
+ St = LHS.divide(RHS, RM);
break;
}
@@ -2661,7 +2860,8 @@ static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E,
Info.CCEDiag(E, diag::note_constexpr_float_arithmetic) << LHS.isNaN();
return Info.noteUndefinedBehavior();
}
- return true;
+
+ return checkFloatingPointResult(Info, E, St);
}
static bool handleLogicalOpForVector(const APInt &LHSValue,
@@ -2744,7 +2944,7 @@ static bool handleCompareOpForVector(const APValue &LHSValue,
}
// Perform binary operations for vector types, in place on the LHS.
-static bool handleVectorVectorBinOp(EvalInfo &Info, const Expr *E,
+static bool handleVectorVectorBinOp(EvalInfo &Info, const BinaryOperator *E,
BinaryOperatorKind Opcode,
APValue &LHSValue,
const APValue &RHSValue) {
@@ -3010,33 +3210,22 @@ static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
/// \param Info Information about the ongoing evaluation.
/// \param E An expression to be used when printing diagnostics.
/// \param VD The variable whose initializer should be obtained.
+/// \param Version The version of the variable within the frame.
/// \param Frame The frame in which the variable was created. Must be null
/// if this variable is not local to the evaluation.
/// \param Result Filled in with a pointer to the value of the variable.
static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
const VarDecl *VD, CallStackFrame *Frame,
- APValue *&Result, const LValue *LVal) {
-
- // If this is a parameter to an active constexpr function call, perform
- // argument substitution.
- if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) {
- // Assume arguments of a potential constant expression are unknown
- // constant expressions.
- if (Info.checkingPotentialConstantExpression())
- return false;
- if (!Frame || !Frame->Arguments) {
- Info.FFDiag(E, diag::note_constexpr_function_param_value_unknown) << VD;
- return false;
- }
- Result = &Frame->Arguments[PVD->getFunctionScopeIndex()];
- return true;
- }
+ unsigned Version, APValue *&Result) {
+ APValue::LValueBase Base(VD, Frame ? Frame->Index : 0, Version);
// If this is a local variable, dig out its value.
if (Frame) {
- Result = LVal ? Frame->getTemporary(VD, LVal->getLValueVersion())
- : Frame->getCurrentTemporary(VD);
- if (!Result) {
+ Result = Frame->getTemporary(VD, Version);
+ if (Result)
+ return true;
+
+ if (!isa<ParmVarDecl>(VD)) {
// Assume variables referenced within a lambda's call operator that were
// not declared within the call operator are captures and during checking
// of a potential constant expression, assume they are unknown constant
@@ -3046,15 +3235,39 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
"missing value for local variable");
if (Info.checkingPotentialConstantExpression())
return false;
- // FIXME: implement capture evaluation during constant expr evaluation.
+ // FIXME: This diagnostic is bogus; we do support captures. Is this code
+ // still reachable at all?
Info.FFDiag(E->getBeginLoc(),
diag::note_unimplemented_constexpr_lambda_feature_ast)
<< "captures not currently allowed";
return false;
}
+ }
+
+ // If we're currently evaluating the initializer of this declaration, use that
+ // in-flight value.
+ if (Info.EvaluatingDecl == Base) {
+ Result = Info.EvaluatingDeclValue;
return true;
}
+ if (isa<ParmVarDecl>(VD)) {
+ // Assume parameters of a potential constant expression are usable in
+ // constant expressions.
+ if (!Info.checkingPotentialConstantExpression() ||
+ !Info.CurrentCall->Callee ||
+ !Info.CurrentCall->Callee->Equals(VD->getDeclContext())) {
+ if (Info.getLangOpts().CPlusPlus11) {
+ Info.FFDiag(E, diag::note_constexpr_function_param_value_unknown)
+ << VD;
+ NoteLValueLocation(Info, Base);
+ } else {
+ Info.FFDiag(E);
+ }
+ }
+ return false;
+ }
+
// Dig out the initializer, and use the declaration which it's attached to.
// FIXME: We should eventually check whether the variable has a reachable
// initializing declaration.
@@ -3065,7 +3278,7 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
if (!Info.checkingPotentialConstantExpression()) {
Info.FFDiag(E, diag::note_constexpr_var_init_unknown, 1)
<< VD;
- Info.Note(VD->getLocation(), diag::note_declared_at);
+ NoteLValueLocation(Info, Base);
}
return false;
}
@@ -3082,42 +3295,41 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
? diag::note_constexpr_ltor_non_constexpr
: diag::note_constexpr_ltor_non_integral, 1)
<< VD << VD->getType();
- Info.Note(VD->getLocation(), diag::note_declared_at);
+ NoteLValueLocation(Info, Base);
}
return false;
}
- // If we're currently evaluating the initializer of this declaration, use that
- // in-flight value.
- if (Info.EvaluatingDecl.dyn_cast<const ValueDecl*>() == VD) {
- Result = Info.EvaluatingDeclValue;
- return true;
- }
-
// Check that we can fold the initializer. In C++, we will have already done
// this in the cases where it matters for conformance.
- SmallVector<PartialDiagnosticAt, 8> Notes;
- if (!VD->evaluateValue(Notes)) {
- Info.FFDiag(E, diag::note_constexpr_var_init_non_constant,
- Notes.size() + 1) << VD;
- Info.Note(VD->getLocation(), diag::note_declared_at);
- Info.addNotes(Notes);
+ if (!VD->evaluateValue()) {
+ Info.FFDiag(E, diag::note_constexpr_var_init_non_constant, 1) << VD;
+ NoteLValueLocation(Info, Base);
return false;
}
- // Check that the variable is actually usable in constant expressions.
- if (!VD->checkInitIsICE()) {
- Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant,
- Notes.size() + 1) << VD;
- Info.Note(VD->getLocation(), diag::note_declared_at);
- Info.addNotes(Notes);
+ // Check that the variable is actually usable in constant expressions. For a
+ // const integral variable or a reference, we might have a non-constant
+ // initializer that we can nonetheless evaluate the initializer for. Such
+ // variables are not usable in constant expressions. In C++98, the
+ // initializer also syntactically needs to be an ICE.
+ //
+ // FIXME: We don't diagnose cases that aren't potentially usable in constant
+ // expressions here; doing so would regress diagnostics for things like
+ // reading from a volatile constexpr variable.
+ if ((Info.getLangOpts().CPlusPlus && !VD->hasConstantInitialization() &&
+ VD->mightBeUsableInConstantExpressions(Info.Ctx)) ||
+ ((Info.getLangOpts().CPlusPlus || Info.getLangOpts().OpenCL) &&
+ !Info.getLangOpts().CPlusPlus11 && !VD->hasICEInitializer(Info.Ctx))) {
+ Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant, 1) << VD;
+ NoteLValueLocation(Info, Base);
}
// Never use the initializer of a weak variable, not even for constant
// folding. We can't be sure that this is the definition that will be used.
if (VD->isWeak()) {
Info.FFDiag(E, diag::note_constexpr_var_init_weak) << VD;
- Info.Note(VD->getLocation(), diag::note_declared_at);
+ NoteLValueLocation(Info, Base);
return false;
}
@@ -3125,11 +3337,6 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
return true;
}
-static bool IsConstNonVolatile(QualType T) {
- Qualifiers Quals = T.getQualifiers();
- return Quals.hasConst() && !Quals.hasVolatile();
-}
-
/// Get the base index of the given base class within an APValue representing
/// the given derived class.
static unsigned getBaseIndex(const CXXRecordDecl *Derived,
@@ -3294,26 +3501,20 @@ static bool lifetimeStartedInEvaluation(EvalInfo &Info,
if (Base.getCallIndex())
return true;
- auto *Evaluating = Info.EvaluatingDecl.dyn_cast<const ValueDecl*>();
- if (!Evaluating)
- return false;
-
- auto *BaseD = Base.dyn_cast<const ValueDecl*>();
-
switch (Info.IsEvaluatingDecl) {
case EvalInfo::EvaluatingDeclKind::None:
return false;
case EvalInfo::EvaluatingDeclKind::Ctor:
// The variable whose initializer we're evaluating.
- if (BaseD)
- return declaresSameEntity(Evaluating, BaseD);
+ if (Info.EvaluatingDecl == Base)
+ return true;
// A temporary lifetime-extended by the variable whose initializer we're
// evaluating.
if (auto *BaseE = Base.dyn_cast<const Expr *>())
if (auto *BaseMTE = dyn_cast<MaterializeTemporaryExpr>(BaseE))
- return declaresSameEntity(BaseMTE->getExtendingDecl(), Evaluating);
+ return Info.EvaluatingDecl == BaseMTE->getExtendingDecl();
return false;
case EvalInfo::EvaluatingDeclKind::Dtor:
@@ -3321,16 +3522,13 @@ static bool lifetimeStartedInEvaluation(EvalInfo &Info,
// [during constant destruction] the lifetime of a and its non-mutable
// subobjects (but not its mutable subobjects) [are] considered to start
// within e.
- //
+ if (MutableSubobject || Base != Info.EvaluatingDecl)
+ return false;
// FIXME: We can meaningfully extend this to cover non-const objects, but
// we will need special handling: we should be able to access only
// subobjects of such objects that are themselves declared const.
- if (!BaseD ||
- !(BaseD->getType().isConstQualified() ||
- BaseD->getType()->isReferenceType()) ||
- MutableSubobject)
- return false;
- return declaresSameEntity(Evaluating, BaseD);
+ QualType T = getType(Base);
+ return T.isConstQualified() || T->isReferenceType();
}
llvm_unreachable("unknown evaluating decl kind");
@@ -3782,12 +3980,10 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
APValue *BaseVal = nullptr;
QualType BaseType = getType(LVal.Base);
- if (const ConstantExpr *CE =
- dyn_cast_or_null<ConstantExpr>(LVal.Base.dyn_cast<const Expr *>())) {
- /// Nested immediate invocation have been previously removed so if we found
- /// a ConstantExpr it can only be the EvaluatingDecl.
- assert(CE->isImmediateInvocation() && CE == Info.EvaluatingDecl);
- (void)CE;
+ if (Info.getLangOpts().CPlusPlus14 && LVal.Base == Info.EvaluatingDecl &&
+ lifetimeStartedInEvaluation(Info, LVal.Base)) {
+ // This is the object whose initializer we're evaluating, so its lifetime
+ // started in the current evaluation.
BaseVal = Info.EvaluatingDeclValue;
} else if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl *>()) {
// Allow reading from a GUID declaration.
@@ -3806,6 +4002,16 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
return CompleteObject(LVal.Base, &V, GD->getType());
}
+ // Allow reading from template parameter objects.
+ if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(D)) {
+ if (isModification(AK)) {
+ Info.FFDiag(E, diag::note_constexpr_modify_global);
+ return CompleteObject();
+ }
+ return CompleteObject(LVal.Base, const_cast<APValue *>(&TPO->getValue()),
+ TPO->getType());
+ }
+
// In C++98, const, non-volatile integers initialized with ICEs are ICEs.
// In C++11, constexpr, non-volatile variables initialized with constant
// expressions are constant expressions too. Inside constexpr functions,
@@ -3823,16 +4029,17 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
return CompleteObject();
}
- // In OpenCL if a variable is in constant address space it is a const value.
- bool IsConstant = BaseType.isConstQualified() ||
- (Info.getLangOpts().OpenCL &&
- BaseType.getAddressSpace() == LangAS::opencl_constant);
+ bool IsConstant = BaseType.isConstant(Info.Ctx);
// Unless we're looking at a local variable or argument in a constexpr call,
// the variable we're reading must be const.
if (!Frame) {
- if (Info.getLangOpts().CPlusPlus14 &&
- lifetimeStartedInEvaluation(Info, LVal.Base)) {
+ if (IsAccess && isa<ParmVarDecl>(VD)) {
+ // Access of a parameter that's not associated with a frame isn't going
+ // to work out, but we can leave it to evaluateVarDeclInit to provide a
+ // suitable diagnostic.
+ } else if (Info.getLangOpts().CPlusPlus14 &&
+ lifetimeStartedInEvaluation(Info, LVal.Base)) {
// OK, we can read and modify an object if we're in the process of
// evaluating its initializer, because its lifetime began in this
// evaluation.
@@ -3843,8 +4050,6 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
} else if (VD->isConstexpr()) {
// OK, we can read this variable.
} else if (BaseType->isIntegralOrEnumerationType()) {
- // In OpenCL if a variable is in constant address space it is a const
- // value.
if (!IsConstant) {
if (!IsAccess)
return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
@@ -3889,7 +4094,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
}
}
- if (!evaluateVarDeclInit(Info, E, VD, Frame, BaseVal, &LVal))
+ if (!evaluateVarDeclInit(Info, E, VD, Frame, LVal.getLValueVersion(), BaseVal))
return CompleteObject();
} else if (DynamicAllocLValue DA = LVal.Base.dyn_cast<DynamicAllocLValue>()) {
Optional<DynAlloc*> Alloc = Info.lookupDynamicAlloc(DA);
@@ -3908,27 +4113,32 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
assert(MTE->getStorageDuration() == SD_Static &&
"should have a frame for a non-global materialized temporary");
- // Per C++1y [expr.const]p2:
+ // C++20 [expr.const]p4: [DR2126]
+ // An object or reference is usable in constant expressions if it is
+ // - a temporary object of non-volatile const-qualified literal type
+ // whose lifetime is extended to that of a variable that is usable
+ // in constant expressions
+ //
+ // C++20 [expr.const]p5:
// an lvalue-to-rvalue conversion [is not allowed unless it applies to]
- // - a [...] glvalue of integral or enumeration type that refers to
- // a non-volatile const object [...]
- // [...]
- // - a [...] glvalue of literal type that refers to a non-volatile
- // object whose lifetime began within the evaluation of e.
+ // - a non-volatile glvalue that refers to an object that is usable
+ // in constant expressions, or
+ // - a non-volatile glvalue of literal type that refers to a
+ // non-volatile object whose lifetime began within the evaluation
+ // of E;
//
// C++11 misses the 'began within the evaluation of e' check and
// instead allows all temporaries, including things like:
// int &&r = 1;
// int x = ++r;
// constexpr int k = r;
- // Therefore we use the C++14 rules in C++11 too.
+ // Therefore we use the C++14-onwards rules in C++11 too.
//
// Note that temporaries whose lifetimes began while evaluating a
// variable's constructor are not usable while evaluating the
// corresponding destructor, not even if they're of const-qualified
// types.
- if (!(BaseType.isConstQualified() &&
- BaseType->isIntegralOrEnumerationType()) &&
+ if (!MTE->isUsableInConstantExpressions(Info.Ctx) &&
!lifetimeStartedInEvaluation(Info, LVal.Base)) {
if (!IsAccess)
return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
@@ -3958,13 +4168,19 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
}
// In C++14, we can't safely access any mutable state when we might be
- // evaluating after an unmodeled side effect.
+ // evaluating after an unmodeled side effect. Parameters are modeled as state
+ // in the caller, but aren't visible once the call returns, so they can be
+ // modified in a speculatively-evaluated call.
//
// FIXME: Not all local state is mutable. Allow local constant subobjects
// to be read here (but take care with 'mutable' fields).
+ unsigned VisibleDepth = Depth;
+ if (llvm::isa_and_nonnull<ParmVarDecl>(
+ LVal.Base.dyn_cast<const ValueDecl *>()))
+ ++VisibleDepth;
if ((Frame && Info.getLangOpts().CPlusPlus14 &&
Info.EvalStatus.HasSideEffects) ||
- (isModification(AK) && Depth < Info.SpeculativeEvaluationDepth))
+ (isModification(AK) && VisibleDepth < Info.SpeculativeEvaluationDepth))
return CompleteObject();
return CompleteObject(LVal.getLValueBase(), BaseVal, BaseType);
@@ -4058,7 +4274,7 @@ static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal,
namespace {
struct CompoundAssignSubobjectHandler {
EvalInfo &Info;
- const Expr *E;
+ const CompoundAssignOperator *E;
QualType PromotedLHSType;
BinaryOperatorKind Opcode;
const APValue &RHS;
@@ -4129,9 +4345,11 @@ struct CompoundAssignSubobjectHandler {
Value = HandleIntToIntCast(Info, E, SubobjType, PromotedLHSType, LHS);
return true;
} else if (RHS.isFloat()) {
+ const FPOptions FPO = E->getFPFeaturesInEffect(
+ Info.Ctx.getLangOpts());
APFloat FValue(0.0);
- return HandleIntToFloatCast(Info, E, SubobjType, Value, PromotedLHSType,
- FValue) &&
+ return HandleIntToFloatCast(Info, E, FPO, SubobjType, Value,
+ PromotedLHSType, FValue) &&
handleFloatFloatBinOp(Info, E, FValue, Opcode, RHS.getFloat()) &&
HandleFloatToIntCast(Info, E, PromotedLHSType, FValue, SubobjType,
Value);
@@ -4178,10 +4396,12 @@ struct CompoundAssignSubobjectHandler {
const AccessKinds CompoundAssignSubobjectHandler::AccessKind;
/// Perform a compound assignment of LVal <op>= RVal.
-static bool handleCompoundAssignment(
- EvalInfo &Info, const Expr *E,
- const LValue &LVal, QualType LValType, QualType PromotedLValType,
- BinaryOperatorKind Opcode, const APValue &RVal) {
+static bool handleCompoundAssignment(EvalInfo &Info,
+ const CompoundAssignOperator *E,
+ const LValue &LVal, QualType LValType,
+ QualType PromotedLValType,
+ BinaryOperatorKind Opcode,
+ const APValue &RVal) {
if (LVal.Designator.Invalid)
return false;
@@ -4573,13 +4793,15 @@ static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
return true;
LValue Result;
- APValue &Val =
- Info.CurrentCall->createTemporary(VD, VD->getType(), true, Result);
+ APValue &Val = Info.CurrentCall->createTemporary(VD, VD->getType(),
+ ScopeKind::Block, Result);
const Expr *InitE = VD->getInit();
- if (!InitE)
+ if (!InitE) {
+ if (VD->getType()->isDependentType())
+ return Info.noteSideEffect();
return getDefaultInitValue(VD->getType(), Val);
-
+ }
if (InitE->isValueDependent())
return false;
@@ -4607,10 +4829,20 @@ static bool EvaluateDecl(EvalInfo &Info, const Decl *D) {
return OK;
}
+static bool EvaluateDependentExpr(const Expr *E, EvalInfo &Info) {
+ assert(E->isValueDependent());
+ if (Info.noteSideEffect())
+ return true;
+ assert(E->containsErrors() && "valid value-dependent expression should never "
+ "reach invalid code path.");
+ return false;
+}
/// Evaluate a condition (either a variable declaration or an expression).
static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl,
const Expr *Cond, bool &Result) {
+ if (Cond->isValueDependent())
+ return false;
FullExpressionRAII Scope(Info);
if (CondDecl && !EvaluateDecl(Info, CondDecl))
return false;
@@ -4835,10 +5067,15 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
EvaluateLoopBody(Result, Info, FS->getBody(), Case);
if (ESR != ESR_Continue)
return ESR;
- if (FS->getInc()) {
- FullExpressionRAII IncScope(Info);
- if (!EvaluateIgnoredValue(Info, FS->getInc()) || !IncScope.destroy())
- return ESR_Failed;
+ if (const auto *Inc = FS->getInc()) {
+ if (Inc->isValueDependent()) {
+ if (!EvaluateDependentExpr(Inc, Info))
+ return ESR_Failed;
+ } else {
+ FullExpressionRAII IncScope(Info);
+ if (!EvaluateIgnoredValue(Info, Inc) || !IncScope.destroy())
+ return ESR_Failed;
+ }
}
break;
}
@@ -4868,13 +5105,18 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
switch (S->getStmtClass()) {
default:
if (const Expr *E = dyn_cast<Expr>(S)) {
- // Don't bother evaluating beyond an expression-statement which couldn't
- // be evaluated.
- // FIXME: Do we need the FullExpressionRAII object here?
- // VisitExprWithCleanups should create one when necessary.
- FullExpressionRAII Scope(Info);
- if (!EvaluateIgnoredValue(Info, E) || !Scope.destroy())
- return ESR_Failed;
+ if (E->isValueDependent()) {
+ if (!EvaluateDependentExpr(E, Info))
+ return ESR_Failed;
+ } else {
+ // Don't bother evaluating beyond an expression-statement which couldn't
+ // be evaluated.
+ // FIXME: Do we need the FullExpressionRAII object here?
+ // VisitExprWithCleanups should create one when necessary.
+ FullExpressionRAII Scope(Info);
+ if (!EvaluateIgnoredValue(Info, E) || !Scope.destroy())
+ return ESR_Failed;
+ }
return ESR_Succeeded;
}
@@ -4900,6 +5142,11 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
case Stmt::ReturnStmtClass: {
const Expr *RetExpr = cast<ReturnStmt>(S)->getRetValue();
FullExpressionRAII Scope(Info);
+ if (RetExpr && RetExpr->isValueDependent()) {
+ EvaluateDependentExpr(RetExpr, Info);
+ // We know we returned, but we don't know what the value is.
+ return ESR_Failed;
+ }
if (RetExpr &&
!(Result.Slot
? EvaluateInPlace(Result.Value, Info, *Result.Slot, RetExpr)
@@ -4987,6 +5234,11 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
return ESR;
Case = nullptr;
+ if (DS->getCond()->isValueDependent()) {
+ EvaluateDependentExpr(DS->getCond(), Info);
+ // Bailout as we don't know whether to keep going or terminate the loop.
+ return ESR_Failed;
+ }
FullExpressionRAII CondScope(Info);
if (!EvaluateAsBooleanCondition(DS->getCond(), Continue, Info) ||
!CondScope.destroy())
@@ -5022,10 +5274,15 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
return ESR;
}
- if (FS->getInc()) {
- FullExpressionRAII IncScope(Info);
- if (!EvaluateIgnoredValue(Info, FS->getInc()) || !IncScope.destroy())
- return ESR_Failed;
+ if (const auto *Inc = FS->getInc()) {
+ if (Inc->isValueDependent()) {
+ if (!EvaluateDependentExpr(Inc, Info))
+ return ESR_Failed;
+ } else {
+ FullExpressionRAII IncScope(Info);
+ if (!EvaluateIgnoredValue(Info, Inc) || !IncScope.destroy())
+ return ESR_Failed;
+ }
}
if (!IterScope.destroy())
@@ -5073,6 +5330,11 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
while (true) {
// Condition: __begin != __end.
{
+ if (FS->getCond()->isValueDependent()) {
+ EvaluateDependentExpr(FS->getCond(), Info);
+ // We don't know whether to keep going or terminate the loop.
+ return ESR_Failed;
+ }
bool Continue = true;
FullExpressionRAII CondExpr(Info);
if (!EvaluateAsBooleanCondition(FS->getCond(), Continue, Info))
@@ -5097,10 +5359,14 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
return ESR_Failed;
return ESR;
}
-
- // Increment: ++__begin
- if (!EvaluateIgnoredValue(Info, FS->getInc()))
- return ESR_Failed;
+ if (FS->getInc()->isValueDependent()) {
+ if (!EvaluateDependentExpr(FS->getInc(), Info))
+ return ESR_Failed;
+ } else {
+ // Increment: ++__begin
+ if (!EvaluateIgnoredValue(Info, FS->getInc()))
+ return ESR_Failed;
+ }
if (!InnerScope.destroy())
return ESR_Failed;
@@ -5195,13 +5461,6 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
return false;
}
- if (const auto *CtorDecl = dyn_cast_or_null<CXXConstructorDecl>(Definition)) {
- for (const auto *InitExpr : CtorDecl->inits()) {
- if (InitExpr->getInit() && InitExpr->getInit()->containsErrors())
- return false;
- }
- }
-
// Can we evaluate this function call?
if (Definition && Definition->isConstexpr() && Body)
return true;
@@ -5719,13 +5978,35 @@ static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr,
return true;
}
-namespace {
-typedef SmallVector<APValue, 8> ArgVector;
+static bool EvaluateCallArg(const ParmVarDecl *PVD, const Expr *Arg,
+ CallRef Call, EvalInfo &Info,
+ bool NonNull = false) {
+ LValue LV;
+ // Create the parameter slot and register its destruction. For a vararg
+ // argument, create a temporary.
+ // FIXME: For calling conventions that destroy parameters in the callee,
+ // should we consider performing destruction when the function returns
+ // instead?
+ APValue &V = PVD ? Info.CurrentCall->createParam(Call, PVD, LV)
+ : Info.CurrentCall->createTemporary(Arg, Arg->getType(),
+ ScopeKind::Call, LV);
+ if (!EvaluateInPlace(V, Info, LV, Arg))
+ return false;
+
+ // Passing a null pointer to an __attribute__((nonnull)) parameter results in
+ // undefined behavior, so is non-constant.
+ if (NonNull && V.isLValue() && V.isNullPointer()) {
+ Info.CCEDiag(Arg, diag::note_non_null_attribute_failed);
+ return false;
+ }
+
+ return true;
}
-/// EvaluateArgs - Evaluate the arguments to a function call.
-static bool EvaluateArgs(ArrayRef<const Expr *> Args, ArgVector &ArgValues,
- EvalInfo &Info, const FunctionDecl *Callee) {
+/// Evaluate the arguments to a function call.
+static bool EvaluateArgs(ArrayRef<const Expr *> Args, CallRef Call,
+ EvalInfo &Info, const FunctionDecl *Callee,
+ bool RightToLeft = false) {
bool Success = true;
llvm::SmallBitVector ForbiddenNullArgs;
if (Callee->hasAttr<NonNullAttr>()) {
@@ -5743,42 +6024,53 @@ static bool EvaluateArgs(ArrayRef<const Expr *> Args, ArgVector &ArgValues,
}
}
}
- // FIXME: This is the wrong evaluation order for an assignment operator
- // called via operator syntax.
- for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
- if (!Evaluate(ArgValues[Idx], Info, Args[Idx])) {
+ for (unsigned I = 0; I < Args.size(); I++) {
+ unsigned Idx = RightToLeft ? Args.size() - I - 1 : I;
+ const ParmVarDecl *PVD =
+ Idx < Callee->getNumParams() ? Callee->getParamDecl(Idx) : nullptr;
+ bool NonNull = !ForbiddenNullArgs.empty() && ForbiddenNullArgs[Idx];
+ if (!EvaluateCallArg(PVD, Args[Idx], Call, Info, NonNull)) {
// If we're checking for a potential constant expression, evaluate all
// initializers even if some of them fail.
if (!Info.noteFailure())
return false;
Success = false;
- } else if (!ForbiddenNullArgs.empty() &&
- ForbiddenNullArgs[Idx] &&
- ArgValues[Idx].isLValue() &&
- ArgValues[Idx].isNullPointer()) {
- Info.CCEDiag(Args[Idx], diag::note_non_null_attribute_failed);
- if (!Info.noteFailure())
- return false;
- Success = false;
}
}
return Success;
}
+/// Perform a trivial copy from Param, which is the parameter of a copy or move
+/// constructor or assignment operator.
+static bool handleTrivialCopy(EvalInfo &Info, const ParmVarDecl *Param,
+ const Expr *E, APValue &Result,
+ bool CopyObjectRepresentation) {
+ // Find the reference argument.
+ CallStackFrame *Frame = Info.CurrentCall;
+ APValue *RefValue = Info.getParamSlot(Frame->Arguments, Param);
+ if (!RefValue) {
+ Info.FFDiag(E);
+ return false;
+ }
+
+ // Copy out the contents of the RHS object.
+ LValue RefLValue;
+ RefLValue.setFrom(Info.Ctx, *RefValue);
+ return handleLValueToRValueConversion(
+ Info, E, Param->getType().getNonReferenceType(), RefLValue, Result,
+ CopyObjectRepresentation);
+}
+
/// Evaluate a function call.
static bool HandleFunctionCall(SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
- ArrayRef<const Expr*> Args, const Stmt *Body,
- EvalInfo &Info, APValue &Result,
- const LValue *ResultSlot) {
- ArgVector ArgValues(Args.size());
- if (!EvaluateArgs(Args, ArgValues, Info, Callee))
- return false;
-
+ ArrayRef<const Expr *> Args, CallRef Call,
+ const Stmt *Body, EvalInfo &Info,
+ APValue &Result, const LValue *ResultSlot) {
if (!Info.CheckCallLimit(CallLoc))
return false;
- CallStackFrame Frame(Info, CallLoc, Callee, This, ArgValues.data());
+ CallStackFrame Frame(Info, CallLoc, Callee, This, Call);
// For a trivial copy or move assignment, perform an APValue copy. This is
// essential for unions, where the operations performed by the assignment
@@ -5793,11 +6085,9 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
isReadByLvalueToRvalueConversion(MD->getParent())))) {
assert(This &&
(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()));
- LValue RHS;
- RHS.setFrom(Info.Ctx, ArgValues[0]);
APValue RHSValue;
- if (!handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(), RHS,
- RHSValue, MD->getParent()->isUnion()))
+ if (!handleTrivialCopy(Info, MD->getParamDecl(0), Args[0], RHSValue,
+ MD->getParent()->isUnion()))
return false;
if (Info.getLangOpts().CPlusPlus20 && MD->isTrivial() &&
!HandleUnionActiveMemberChange(Info, Args[0], *This))
@@ -5831,7 +6121,7 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
/// Evaluate a constructor call.
static bool HandleConstructorCall(const Expr *E, const LValue &This,
- APValue *ArgValues,
+ CallRef Call,
const CXXConstructorDecl *Definition,
EvalInfo &Info, APValue &Result) {
SourceLocation CallLoc = E->getExprLoc();
@@ -5848,7 +6138,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
Info,
ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries},
RD->getNumBases());
- CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues);
+ CallStackFrame Frame(Info, CallLoc, Definition, &This, Call);
// FIXME: Creating an APValue just to hold a nonexistent return value is
// wasteful.
@@ -5858,7 +6148,10 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// If it's a delegating constructor, delegate.
if (Definition->isDelegatingConstructor()) {
CXXConstructorDecl::init_const_iterator I = Definition->init_begin();
- {
+ if ((*I)->getInit()->isValueDependent()) {
+ if (!EvaluateDependentExpr((*I)->getInit(), Info))
+ return false;
+ } else {
FullExpressionRAII InitScope(Info);
if (!EvaluateInPlace(Result, Info, This, (*I)->getInit()) ||
!InitScope.destroy())
@@ -5879,11 +6172,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
(Definition->getParent()->isUnion() ||
(Definition->isTrivial() &&
isReadByLvalueToRvalueConversion(Definition->getParent())))) {
- LValue RHS;
- RHS.setFrom(Info.Ctx, ArgValues[0]);
- return handleLValueToRValueConversion(
- Info, E, Definition->getParamDecl(0)->getType().getNonReferenceType(),
- RHS, Result, Definition->getParent()->isUnion());
+ return handleTrivialCopy(Info, Definition->getParamDecl(0), E, Result,
+ Definition->getParent()->isUnion());
}
// Reserve space for the struct members.
@@ -6002,17 +6292,22 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// This refers to innermost anonymous struct/union containing initializer,
// not to currently constructed class.
const Expr *Init = I->getInit();
- ThisOverrideRAII ThisOverride(*Info.CurrentCall, &SubobjectParent,
- isa<CXXDefaultInitExpr>(Init));
- FullExpressionRAII InitScope(Info);
- if (!EvaluateInPlace(*Value, Info, Subobject, Init) ||
- (FD && FD->isBitField() &&
- !truncateBitfieldValue(Info, Init, *Value, FD))) {
- // If we're checking for a potential constant expression, evaluate all
- // initializers even if some of them fail.
- if (!Info.noteFailure())
+ if (Init->isValueDependent()) {
+ if (!EvaluateDependentExpr(Init, Info))
return false;
- Success = false;
+ } else {
+ ThisOverrideRAII ThisOverride(*Info.CurrentCall, &SubobjectParent,
+ isa<CXXDefaultInitExpr>(Init));
+ FullExpressionRAII InitScope(Info);
+ if (!EvaluateInPlace(*Value, Info, Subobject, Init) ||
+ (FD && FD->isBitField() &&
+ !truncateBitfieldValue(Info, Init, *Value, FD))) {
+ // If we're checking for a potential constant expression, evaluate all
+ // initializers even if some of them fail.
+ if (!Info.noteFailure())
+ return false;
+ Success = false;
+ }
}
// This is the point at which the dynamic type of the object becomes this
@@ -6042,12 +6337,13 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
ArrayRef<const Expr*> Args,
const CXXConstructorDecl *Definition,
EvalInfo &Info, APValue &Result) {
- ArgVector ArgValues(Args.size());
- if (!EvaluateArgs(Args, ArgValues, Info, Definition))
+ CallScopeRAII CallScope(Info);
+ CallRef Call = Info.CurrentCall->createCall(Definition);
+ if (!EvaluateArgs(Args, Call, Info, Definition))
return false;
- return HandleConstructorCall(E, This, ArgValues.data(), Definition,
- Info, Result);
+ return HandleConstructorCall(E, This, Call, Definition, Info, Result) &&
+ CallScope.destroy();
}
static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
@@ -6143,7 +6439,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
if (!CheckConstexprFunction(Info, CallLoc, DD, Definition, Body))
return false;
- CallStackFrame Frame(Info, CallLoc, Definition, &This, nullptr);
+ CallStackFrame Frame(Info, CallLoc, Definition, &This, CallRef());
// We're now in the period of destruction of this object.
unsigned BasesLeft = RD->getNumBases();
@@ -6608,9 +6904,15 @@ class APValueToBufferConverter {
}
bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) {
- CharUnits Width = Info.Ctx.getTypeSizeInChars(Ty);
- SmallVector<unsigned char, 8> Bytes(Width.getQuantity());
- llvm::StoreIntToMemory(Val, &*Bytes.begin(), Width.getQuantity());
+ APSInt AdjustedVal = Val;
+ unsigned Width = AdjustedVal.getBitWidth();
+ if (Ty->isBooleanType()) {
+ Width = Info.Ctx.getTypeSize(Ty);
+ AdjustedVal = AdjustedVal.extend(Width);
+ }
+
+ SmallVector<unsigned char, 8> Bytes(Width / 8);
+ llvm::StoreIntToMemory(AdjustedVal, &*Bytes.begin(), Width / 8);
Buffer.writeObject(Offset, Bytes);
return true;
}
@@ -6651,6 +6953,13 @@ class BufferToAPValueConverter {
return None;
}
+ llvm::NoneType unrepresentableValue(QualType Ty, const APSInt &Val) {
+ Info.FFDiag(BCE->getBeginLoc(),
+ diag::note_constexpr_bit_cast_unrepresentable_value)
+ << Ty << Val.toString(/*Radix=*/10);
+ return None;
+ }
+
Optional<APValue> visit(const BuiltinType *T, CharUnits Offset,
const EnumType *EnumSugar = nullptr) {
if (T->isNullPtrType()) {
@@ -6661,6 +6970,20 @@ class BufferToAPValueConverter {
}
CharUnits SizeOf = Info.Ctx.getTypeSizeInChars(T);
+
+ // Work around floating point types that contain unused padding bytes. This
+ // is really just `long double` on x86, which is the only fundamental type
+ // with padding bytes.
+ if (T->isRealFloatingType()) {
+ const llvm::fltSemantics &Semantics =
+ Info.Ctx.getFloatTypeSemantics(QualType(T, 0));
+ unsigned NumBits = llvm::APFloatBase::getSizeInBits(Semantics);
+ assert(NumBits % 8 == 0);
+ CharUnits NumBytes = CharUnits::fromQuantity(NumBits / 8);
+ if (NumBytes != SizeOf)
+ SizeOf = NumBytes;
+ }
+
SmallVector<uint8_t, 8> Bytes;
if (!Buffer.readObject(Offset, SizeOf, Bytes)) {
// If this is std::byte or unsigned char, then its okay to store an
@@ -6685,6 +7008,15 @@ class BufferToAPValueConverter {
if (T->isIntegralOrEnumerationType()) {
Val.setIsSigned(T->isSignedIntegerOrEnumerationType());
+
+ unsigned IntWidth = Info.Ctx.getIntWidth(QualType(T, 0));
+ if (IntWidth != Val.getBitWidth()) {
+ APSInt Truncated = Val.trunc(IntWidth);
+ if (Truncated.extend(Val.getBitWidth()) != Val)
+ return unrepresentableValue(QualType(T, 0), Val);
+ Val = Truncated;
+ }
+
return APValue(Val);
}
@@ -7095,8 +7427,8 @@ public:
LValue CommonLV;
if (!Evaluate(Info.CurrentCall->createTemporary(
E->getOpaqueValue(),
- getStorageType(Info.Ctx, E->getOpaqueValue()), false,
- CommonLV),
+ getStorageType(Info.Ctx, E->getOpaqueValue()),
+ ScopeKind::FullExpression, CommonLV),
Info, E->getCommon()))
return false;
@@ -7160,7 +7492,8 @@ public:
LValue LV;
if (!Evaluate(Info.CurrentCall->createTemporary(
- OVE, getStorageType(Info.Ctx, OVE), false, LV),
+ OVE, getStorageType(Info.Ctx, OVE),
+ ScopeKind::FullExpression, LV),
Info, OVE->getSourceExpr()))
return false;
} else if (SemE == E->getResultExpr()) {
@@ -7183,6 +7516,8 @@ public:
bool handleCallExpr(const CallExpr *E, APValue &Result,
const LValue *ResultSlot) {
+ CallScopeRAII CallScope(Info);
+
const Expr *Callee = E->getCallee()->IgnoreParens();
QualType CalleeType = Callee->getType();
@@ -7191,6 +7526,8 @@ public:
auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
bool HasQualifier = false;
+ CallRef Call;
+
// Extract function decl and 'this' pointer from the callee.
if (CalleeType->isSpecificBuiltinType(BuiltinType::BoundMember)) {
const CXXMethodDecl *Member = nullptr;
@@ -7222,14 +7559,14 @@ public:
return Error(Callee);
FD = Member;
} else if (CalleeType->isFunctionPointerType()) {
- LValue Call;
- if (!EvaluatePointer(Callee, Call, Info))
+ LValue CalleeLV;
+ if (!EvaluatePointer(Callee, CalleeLV, Info))
return false;
- if (!Call.getLValueOffset().isZero())
+ if (!CalleeLV.getLValueOffset().isZero())
return Error(Callee);
FD = dyn_cast_or_null<FunctionDecl>(
- Call.getLValueBase().dyn_cast<const ValueDecl*>());
+ CalleeLV.getLValueBase().dyn_cast<const ValueDecl *>());
if (!FD)
return Error(Callee);
// Don't call function pointers which have been cast to some other type.
@@ -7239,6 +7576,17 @@ public:
return Error(E);
}
+ // For an (overloaded) assignment expression, evaluate the RHS before the
+ // LHS.
+ auto *OCE = dyn_cast<CXXOperatorCallExpr>(E);
+ if (OCE && OCE->isAssignmentOp()) {
+ assert(Args.size() == 2 && "wrong number of arguments in assignment");
+ Call = Info.CurrentCall->createCall(FD);
+ if (!EvaluateArgs(isa<CXXMethodDecl>(FD) ? Args.slice(1) : Args, Call,
+ Info, FD, /*RightToLeft=*/true))
+ return false;
+ }
+
// Overloaded operator calls to member functions are represented as normal
// calls with '*this' as the first argument.
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
@@ -7293,14 +7641,21 @@ public:
if (!HandleOperatorNewCall(Info, E, Ptr))
return false;
Ptr.moveInto(Result);
- return true;
+ return CallScope.destroy();
} else {
- return HandleOperatorDeleteCall(Info, E);
+ return HandleOperatorDeleteCall(Info, E) && CallScope.destroy();
}
}
} else
return Error(E);
+ // Evaluate the arguments now if we've not already done so.
+ if (!Call) {
+ Call = Info.CurrentCall->createCall(FD);
+ if (!EvaluateArgs(Args, Call, Info, FD))
+ return false;
+ }
+
SmallVector<QualType, 4> CovariantAdjustmentPath;
if (This) {
auto *NamedMember = dyn_cast<CXXMethodDecl>(FD);
@@ -7323,15 +7678,16 @@ public:
if (auto *DD = dyn_cast<CXXDestructorDecl>(FD)) {
assert(This && "no 'this' pointer for destructor call");
return HandleDestruction(Info, E, *This,
- Info.Ctx.getRecordType(DD->getParent()));
+ Info.Ctx.getRecordType(DD->getParent())) &&
+ CallScope.destroy();
}
const FunctionDecl *Definition = nullptr;
Stmt *Body = FD->getBody(Definition);
if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition, Body) ||
- !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body, Info,
- Result, ResultSlot))
+ !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Call,
+ Body, Info, Result, ResultSlot))
return false;
if (!CovariantAdjustmentPath.empty() &&
@@ -7339,7 +7695,7 @@ public:
CovariantAdjustmentPath))
return false;
- return true;
+ return CallScope.destroy();
}
bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
@@ -7750,20 +8106,20 @@ public:
/// * @selector() expressions in Objective-C
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
bool InvalidBaseOK) {
+ assert(!E->isValueDependent());
assert(E->isGLValue() || E->getType()->isFunctionType() ||
E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E));
return LValueExprEvaluator(Info, Result, InvalidBaseOK).Visit(E);
}
bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl()))
- return Success(FD);
- if (const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ const NamedDecl *D = E->getDecl();
+ if (isa<FunctionDecl, MSGuidDecl, TemplateParamObjectDecl>(D))
+ return Success(cast<ValueDecl>(D));
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VisitVarDecl(E, VD);
- if (const BindingDecl *BD = dyn_cast<BindingDecl>(E->getDecl()))
+ if (const BindingDecl *BD = dyn_cast<BindingDecl>(D))
return Visit(BD->getBinding());
- if (const MSGuidDecl *GD = dyn_cast<MSGuidDecl>(E->getDecl()))
- return Success(GD);
return Error(E);
}
@@ -7803,31 +8159,51 @@ bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
return true;
}
}
+
CallStackFrame *Frame = nullptr;
- if (VD->hasLocalStorage() && Info.CurrentCall->Index > 1) {
+ unsigned Version = 0;
+ if (VD->hasLocalStorage()) {
// Only if a local variable was declared in the function currently being
// evaluated, do we expect to be able to find its value in the current
// frame. (Otherwise it was likely declared in an enclosing context and
// could either have a valid evaluatable value (for e.g. a constexpr
// variable) or be ill-formed (and trigger an appropriate evaluation
// diagnostic)).
- if (Info.CurrentCall->Callee &&
- Info.CurrentCall->Callee->Equals(VD->getDeclContext())) {
- Frame = Info.CurrentCall;
+ CallStackFrame *CurrFrame = Info.CurrentCall;
+ if (CurrFrame->Callee && CurrFrame->Callee->Equals(VD->getDeclContext())) {
+ // Function parameters are stored in some caller's frame. (Usually the
+ // immediate caller, but for an inherited constructor they may be more
+ // distant.)
+ if (auto *PVD = dyn_cast<ParmVarDecl>(VD)) {
+ if (CurrFrame->Arguments) {
+ VD = CurrFrame->Arguments.getOrigParam(PVD);
+ Frame =
+ Info.getCallFrameAndDepth(CurrFrame->Arguments.CallIndex).first;
+ Version = CurrFrame->Arguments.Version;
+ }
+ } else {
+ Frame = CurrFrame;
+ Version = CurrFrame->getCurrentTemporaryVersion(VD);
+ }
}
}
if (!VD->getType()->isReferenceType()) {
if (Frame) {
- Result.set({VD, Frame->Index,
- Info.CurrentCall->getCurrentTemporaryVersion(VD)});
+ Result.set({VD, Frame->Index, Version});
return true;
}
return Success(VD);
}
+ if (!Info.getLangOpts().CPlusPlus11) {
+ Info.CCEDiag(E, diag::note_constexpr_ltor_non_integral, 1)
+ << VD << VD->getType();
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ }
+
APValue *V;
- if (!evaluateVarDeclInit(Info, E, VD, Frame, V, nullptr))
+ if (!evaluateVarDeclInit(Info, E, VD, Frame, Version, V))
return false;
if (!V->hasValue()) {
// FIXME: Is it possible for V to be indeterminate here? If so, we should
@@ -7857,12 +8233,16 @@ bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
// value for use outside this evaluation.
APValue *Value;
if (E->getStorageDuration() == SD_Static) {
+ // FIXME: What about SD_Thread?
Value = E->getOrCreateValue(true);
*Value = APValue();
Result.set(E);
} else {
Value = &Info.CurrentCall->createTemporary(
- E, E->getType(), E->getStorageDuration() == SD_Automatic, Result);
+ E, E->getType(),
+ E->getStorageDuration() == SD_FullExpression ? ScopeKind::FullExpression
+ : ScopeKind::Block,
+ Result);
}
QualType Type = Inner->getType();
@@ -7969,16 +8349,19 @@ bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
if (E->getBase()->getType()->isVectorType())
return Error(E);
+ APSInt Index;
bool Success = true;
- if (!evaluatePointer(E->getBase(), Result)) {
- if (!Info.noteFailure())
- return false;
- Success = false;
- }
- APSInt Index;
- if (!EvaluateInteger(E->getIdx(), Index, Info))
- return false;
+ // C++17's rules require us to evaluate the LHS first, regardless of which
+ // side is the base.
+ for (const Expr *SubExpr : {E->getLHS(), E->getRHS()}) {
+ if (SubExpr == E->getBase() ? !evaluatePointer(SubExpr, Result)
+ : !EvaluateInteger(SubExpr, Index, Info)) {
+ if (!Info.noteFailure())
+ return false;
+ Success = false;
+ }
+ }
return Success &&
HandleLValueArrayAdjustment(Info, E, Result, E->getType(), Index);
@@ -8023,16 +8406,18 @@ bool LValueExprEvaluator::VisitCompoundAssignOperator(
if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
return Error(CAO);
- APValue RHS;
+ bool Success = true;
- // The overall lvalue result is the result of evaluating the LHS.
- if (!this->Visit(CAO->getLHS())) {
- if (Info.noteFailure())
- Evaluate(RHS, this->Info, CAO->getRHS());
- return false;
+ // C++17 onwards require that we evaluate the RHS first.
+ APValue RHS;
+ if (!Evaluate(RHS, this->Info, CAO->getRHS())) {
+ if (!Info.noteFailure())
+ return false;
+ Success = false;
}
- if (!Evaluate(RHS, this->Info, CAO->getRHS()))
+ // The overall lvalue result is the result of evaluating the LHS.
+ if (!this->Visit(CAO->getLHS()) || !Success)
return false;
return handleCompoundAssignment(
@@ -8045,15 +8430,17 @@ bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
return Error(E);
- APValue NewVal;
+ bool Success = true;
- if (!this->Visit(E->getLHS())) {
- if (Info.noteFailure())
- Evaluate(NewVal, this->Info, E->getRHS());
- return false;
+ // C++17 onwards require that we evaluate the RHS first.
+ APValue NewVal;
+ if (!Evaluate(NewVal, this->Info, E->getRHS())) {
+ if (!Info.noteFailure())
+ return false;
+ Success = false;
}
- if (!Evaluate(NewVal, this->Info, E->getRHS()))
+ if (!this->Visit(E->getLHS()) || !Success)
return false;
if (Info.getLangOpts().CPlusPlus20 &&
@@ -8280,6 +8667,7 @@ public:
static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info,
bool InvalidBaseOK) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && E->getType()->hasPointerRepresentation());
return PointerExprEvaluator(Info, Result, InvalidBaseOK).Visit(E);
}
@@ -8409,7 +8797,7 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
return false;
} else {
APValue &Value = Info.CurrentCall->createTemporary(
- SubExpr, SubExpr->getType(), false, Result);
+ SubExpr, SubExpr->getType(), ScopeKind::FullExpression, Result);
if (!EvaluateInPlace(Value, Info, Result, SubExpr))
return false;
}
@@ -8974,6 +9362,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
const Expr *Init = E->getInitializer();
const InitListExpr *ResizedArrayILE = nullptr;
const CXXConstructExpr *ResizedArrayCCE = nullptr;
+ bool ValueInit = false;
QualType AllocType = E->getAllocatedType();
if (Optional<const Expr*> ArraySize = E->getArraySize()) {
@@ -9017,7 +9406,14 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
// -- the new-initializer is a braced-init-list and the number of
// array elements for which initializers are provided [...]
// exceeds the number of elements to initialize
- if (Init && !isa<CXXConstructExpr>(Init)) {
+ if (!Init) {
+ // No initialization is performed.
+ } else if (isa<CXXScalarValueInitExpr>(Init) ||
+ isa<ImplicitValueInitExpr>(Init)) {
+ ValueInit = true;
+ } else if (auto *CCE = dyn_cast<CXXConstructExpr>(Init)) {
+ ResizedArrayCCE = CCE;
+ } else {
auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType());
assert(CAT && "unexpected type for array initializer");
@@ -9040,8 +9436,6 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
// special handling for this case when we initialize.
if (InitBound != AllocBound)
ResizedArrayILE = cast<InitListExpr>(Init);
- } else if (Init) {
- ResizedArrayCCE = cast<CXXConstructExpr>(Init);
}
AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr,
@@ -9102,7 +9496,11 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
return false;
}
- if (ResizedArrayILE) {
+ if (ValueInit) {
+ ImplicitValueInitExpr VIE(AllocType);
+ if (!EvaluateInPlace(*Val, Info, Result, &VIE))
+ return false;
+ } else if (ResizedArrayILE) {
if (!EvaluateArrayNewInitList(Info, Result, *Val, ResizedArrayILE,
AllocType))
return false;
@@ -9157,6 +9555,7 @@ public:
static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
EvalInfo &Info) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && E->getType()->isMemberPointerType());
return MemberPointerExprEvaluator(Info, Result).Visit(E);
}
@@ -9285,7 +9684,7 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
for (const auto *I : RD->fields()) {
// -- if T is a reference type, no initialization is performed.
- if (I->getType()->isReferenceType())
+ if (I->isUnnamedBitfield() || I->getType()->isReferenceType())
continue;
LValue Subobject = This;
@@ -9308,6 +9707,8 @@ bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) {
// C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
// object's first non-static named data member is zero-initialized
RecordDecl::field_iterator I = RD->field_begin();
+ while (I != RD->field_end() && (*I)->isUnnamedBitfield())
+ ++I;
if (I == RD->field_end()) {
Result = APValue((const FieldDecl*)nullptr);
return true;
@@ -9631,6 +10032,7 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
static bool EvaluateRecord(const Expr *E, const LValue &This,
APValue &Result, EvalInfo &Info) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && E->getType()->isRecordType() &&
"can't evaluate expression as a record rvalue");
return RecordExprEvaluator(Info, This, Result).Visit(E);
@@ -9652,8 +10054,8 @@ public:
/// Visit an expression which constructs the value of this temporary.
bool VisitConstructExpr(const Expr *E) {
- APValue &Value =
- Info.CurrentCall->createTemporary(E, E->getType(), false, Result);
+ APValue &Value = Info.CurrentCall->createTemporary(
+ E, E->getType(), ScopeKind::FullExpression, Result);
return EvaluateInPlace(Value, Info, Result, E);
}
@@ -9686,6 +10088,7 @@ public:
/// Evaluate an expression of record type as a temporary.
static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && E->getType()->isRecordType());
return TemporaryExprEvaluator(Info, Result).Visit(E);
}
@@ -9930,8 +10333,7 @@ namespace {
const ConstantArrayType *CAT =
Info.Ctx.getAsConstantArrayType(E->getType());
if (!CAT) {
- if (const IncompleteArrayType *IAT =
- Info.Ctx.getAsIncompleteArrayType(E->getType())) {
+ if (E->getType()->isIncompleteArrayType()) {
// We can be asked to zero-initialize a flexible array member; this
// is represented as an ImplicitValueInitExpr of incomplete array
// type. In this case, the array has zero elements.
@@ -9973,6 +10375,7 @@ namespace {
static bool EvaluateArray(const Expr *E, const LValue &This,
APValue &Result, EvalInfo &Info) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && E->getType()->isArrayType() && "not an array rvalue");
return ArrayExprEvaluator(Info, This, Result).Visit(E);
}
@@ -9980,6 +10383,7 @@ static bool EvaluateArray(const Expr *E, const LValue &This,
static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
APValue &Result, const InitListExpr *ILE,
QualType AllocType) {
+ assert(!ILE->isValueDependent());
assert(ILE->isRValue() && ILE->getType()->isArrayType() &&
"not an array rvalue");
return ArrayExprEvaluator(Info, This, Result)
@@ -9990,6 +10394,7 @@ static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
APValue &Result,
const CXXConstructExpr *CCE,
QualType AllocType) {
+ assert(!CCE->isValueDependent());
assert(CCE->isRValue() && CCE->getType()->isArrayType() &&
"not an array rvalue");
return ArrayExprEvaluator(Info, This, Result)
@@ -10091,8 +10496,8 @@ bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
if (E->getCommonExpr() &&
!Evaluate(Info.CurrentCall->createTemporary(
E->getCommonExpr(),
- getStorageType(Info.Ctx, E->getCommonExpr()), false,
- CommonLV),
+ getStorageType(Info.Ctx, E->getCommonExpr()),
+ ScopeKind::FullExpression, CommonLV),
Info, E->getCommonExpr()->getSourceExpr()))
return false;
@@ -10367,11 +10772,13 @@ class FixedPointExprEvaluator
/// like char*).
static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
EvalInfo &Info) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && E->getType()->isIntegralOrEnumerationType());
return IntExprEvaluator(Info, Result).Visit(E);
}
static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) {
+ assert(!E->isValueDependent());
APValue Val;
if (!EvaluateIntegerOrLValue(E, Val, Info))
return false;
@@ -10393,6 +10800,7 @@ bool IntExprEvaluator::VisitSourceLocExpr(const SourceLocExpr *E) {
static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
EvalInfo &Info) {
+ assert(!E->isValueDependent());
if (E->getType()->isFixedPointType()) {
APValue Val;
if (!FixedPointExprEvaluator(Info, Val).Visit(E))
@@ -10408,6 +10816,7 @@ static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
EvalInfo &Info) {
+ assert(!E->isValueDependent());
if (E->getType()->isIntegerType()) {
auto FXSema = Info.Ctx.getFixedPointSemantics(E->getType());
APSInt Val;
@@ -10559,6 +10968,9 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
return GCCTypeClass::None;
case BuiltinType::Dependent:
@@ -11152,6 +11564,17 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(AlignedVal, E);
}
+ case Builtin::BI__builtin_bitreverse8:
+ case Builtin::BI__builtin_bitreverse16:
+ case Builtin::BI__builtin_bitreverse32:
+ case Builtin::BI__builtin_bitreverse64: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+
+ return Success(Val.reverseBits(), E);
+ }
+
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
@@ -11319,6 +11742,40 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Val.countPopulation(), E);
}
+ case Builtin::BI__builtin_rotateleft8:
+ case Builtin::BI__builtin_rotateleft16:
+ case Builtin::BI__builtin_rotateleft32:
+ case Builtin::BI__builtin_rotateleft64:
+ case Builtin::BI_rotl8: // Microsoft variants of rotate right
+ case Builtin::BI_rotl16:
+ case Builtin::BI_rotl:
+ case Builtin::BI_lrotl:
+ case Builtin::BI_rotl64: {
+ APSInt Val, Amt;
+ if (!EvaluateInteger(E->getArg(0), Val, Info) ||
+ !EvaluateInteger(E->getArg(1), Amt, Info))
+ return false;
+
+ return Success(Val.rotl(Amt.urem(Val.getBitWidth())), E);
+ }
+
+ case Builtin::BI__builtin_rotateright8:
+ case Builtin::BI__builtin_rotateright16:
+ case Builtin::BI__builtin_rotateright32:
+ case Builtin::BI__builtin_rotateright64:
+ case Builtin::BI_rotr8: // Microsoft variants of rotate right
+ case Builtin::BI_rotr16:
+ case Builtin::BI_rotr:
+ case Builtin::BI_lrotr:
+ case Builtin::BI_rotr64: {
+ APSInt Val, Amt;
+ if (!EvaluateInteger(E->getArg(0), Val, Info) ||
+ !EvaluateInteger(E->getArg(1), Amt, Info))
+ return false;
+
+ return Success(Val.rotr(Amt.urem(Val.getBitWidth())), E);
+ }
+
case Builtin::BIstrlen:
case Builtin::BIwcslen:
// A call to strlen is not a constant expression.
@@ -11497,8 +11954,8 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return false;
// For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
- // of two less than the maximum inline atomic width, we know it is
- // lock-free. If the size isn't a power of two, or greater than the
+ // of two less than or equal to the maximum inline atomic width, we know it
+ // is lock-free. If the size isn't a power of two, or greater than the
// maximum alignment where we promote atomics, we know it is not lock-free
// (at least not in the sense of atomic_is_lock_free). Otherwise,
// the answer can only be determined at runtime; for example, 16-byte
@@ -12027,6 +12484,7 @@ template <class SuccessCB, class AfterCB>
static bool
EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
SuccessCB &&Success, AfterCB &&DoAfter) {
+ assert(!E->isValueDependent());
assert(E->isComparisonOp() && "expected comparison operator");
assert((E->getOpcode() == BO_Cmp ||
E->getType()->isIntegralOrEnumerationType()) &&
@@ -12127,8 +12585,16 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
return false;
assert(E->isComparisonOp() && "Invalid binary operator!");
+ llvm::APFloatBase::cmpResult APFloatCmpResult = LHS.compare(RHS);
+ if (!Info.InConstantContext &&
+ APFloatCmpResult == APFloat::cmpUnordered &&
+ E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()).isFPConstrained()) {
+ // Note: Compares may raise invalid in some cases involving NaN or sNaN.
+ Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
+ return false;
+ }
auto GetCmpRes = [&]() {
- switch (LHS.compare(RHS)) {
+ switch (APFloatCmpResult) {
case APFloat::cmpEqual:
return CmpResult::Equal;
case APFloat::cmpLessThan:
@@ -12363,7 +12829,8 @@ bool RecordExprEvaluator::VisitBinCmp(const BinaryOperator *E) {
LV.set(VD);
if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
return false;
- return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result);
+ return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result,
+ ConstantExprKind::Normal);
};
return EvaluateComparisonBinaryOperator(Info, E, OnSuccess, [&]() {
return ExprEvaluatorBaseTy::VisitBinCmp(E);
@@ -12709,6 +13176,8 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_NonAtomicToAtomic:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
case CK_FixedPointCast:
case CK_IntegralToFixedPoint:
llvm_unreachable("invalid cast kind for integral value");
@@ -12953,6 +13422,26 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
return Success(IntResult, E);
}
+ case CK_FloatingToFixedPoint: {
+ APFloat Src(0.0);
+ if (!EvaluateFloat(SubExpr, Src, Info))
+ return false;
+
+ bool Overflowed;
+ APFixedPoint Result = APFixedPoint::getFromFloatValue(
+ Src, Info.Ctx.getFixedPointSemantics(DestType), &Overflowed);
+
+ if (Overflowed) {
+ if (Info.checkingForUndefinedBehavior())
+ Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
+ diag::warn_fixedpoint_constant_overflow)
+ << Result.toString() << E->getType();
+ else if (!HandleOverflow(Info, E, Result, E->getType()))
+ return false;
+ }
+
+ return Success(Result, E);
+ }
case CK_NoOp:
case CK_LValueToRValue:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
@@ -13004,6 +13493,29 @@ bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
.convert(ResultFXSema, &ConversionOverflow);
break;
}
+ case BO_Shl:
+ case BO_Shr: {
+ FixedPointSemantics LHSSema = LHSFX.getSemantics();
+ llvm::APSInt RHSVal = RHSFX.getValue();
+
+ unsigned ShiftBW =
+ LHSSema.getWidth() - (unsigned)LHSSema.hasUnsignedPadding();
+ unsigned Amt = RHSVal.getLimitedValue(ShiftBW - 1);
+ // Embedded-C 4.1.6.2.2:
+ // The right operand must be nonnegative and less than the total number
+ // of (nonpadding) bits of the fixed-point operand ...
+ if (RHSVal.isNegative())
+ Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHSVal;
+ else if (Amt != RHSVal)
+ Info.CCEDiag(E, diag::note_constexpr_large_shift)
+ << RHSVal << E->getType() << ShiftBW;
+
+ if (E->getOpcode() == BO_Shl)
+ Result = LHSFX.shl(Amt, &OpOverflow);
+ else
+ Result = LHSFX.shr(Amt, &OpOverflow);
+ break;
+ }
default:
return false;
}
@@ -13055,6 +13567,7 @@ public:
} // end anonymous namespace
static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && E->getType()->isRealFloatingType());
return FloatExprEvaluator(Info, Result).Visit(E);
}
@@ -13140,6 +13653,11 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_fabsf:
case Builtin::BI__builtin_fabsl:
case Builtin::BI__builtin_fabsf128:
+ // The C standard says "fabs raises no floating-point exceptions,
+ // even if x is a signaling NaN. The returned value is independent of
+ // the current rounding direction mode." Therefore constant folding can
+ // proceed without regard to the floating point settings.
+ // Reference, WG14 N2478 F.10.4.3
if (!EvaluateFloat(E->getArg(0), Result, Info))
return false;
@@ -13198,6 +13716,9 @@ bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
case UO_Plus:
return EvaluateFloat(E->getSubExpr(), Result, Info);
case UO_Minus:
+ // In C standard, WG14 N2478 F.3 p4
+ // "the unary - raises no floating point exceptions,
+ // even if the operand is signalling."
if (!EvaluateFloat(E->getSubExpr(), Result, Info))
return false;
Result.changeSign();
@@ -13231,9 +13752,20 @@ bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_IntegralToFloating: {
APSInt IntResult;
+ const FPOptions FPO = E->getFPFeaturesInEffect(
+ Info.Ctx.getLangOpts());
return EvaluateInteger(SubExpr, IntResult, Info) &&
- HandleIntToFloatCast(Info, E, SubExpr->getType(), IntResult,
- E->getType(), Result);
+ HandleIntToFloatCast(Info, E, FPO, SubExpr->getType(),
+ IntResult, E->getType(), Result);
+ }
+
+ case CK_FixedPointToFloating: {
+ APFixedPoint FixResult(Info.Ctx.getFixedPointSemantics(SubExpr->getType()));
+ if (!EvaluateFixedPoint(SubExpr, FixResult, Info))
+ return false;
+ Result =
+ FixResult.convertToFloat(Info.Ctx.getFloatTypeSemantics(E->getType()));
+ return true;
}
case CK_FloatingCast: {
@@ -13282,11 +13814,13 @@ public:
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitUnaryOperator(const UnaryOperator *E);
bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCallExpr(const CallExpr *E);
};
} // end anonymous namespace
static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
EvalInfo &Info) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && E->getType()->isAnyComplexType());
return ComplexExprEvaluator(Info, Result).Visit(E);
}
@@ -13380,6 +13914,8 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_NonAtomicToAtomic:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
@@ -13460,13 +13996,15 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (!Visit(E->getSubExpr()))
return false;
+ const FPOptions FPO = E->getFPFeaturesInEffect(
+ Info.Ctx.getLangOpts());
QualType To = E->getType()->castAs<ComplexType>()->getElementType();
QualType From
= E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
Result.makeComplexFloat();
- return HandleIntToFloatCast(Info, E, From, Result.IntReal,
+ return HandleIntToFloatCast(Info, E, FPO, From, Result.IntReal,
To, Result.FloatReal) &&
- HandleIntToFloatCast(Info, E, From, Result.IntImag,
+ HandleIntToFloatCast(Info, E, FPO, From, Result.IntImag,
To, Result.FloatImag);
}
}
@@ -13750,6 +14288,23 @@ bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
return ExprEvaluatorBaseTy::VisitInitListExpr(E);
}
+bool ComplexExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ switch (E->getBuiltinCallee()) {
+ case Builtin::BI__builtin_complex:
+ Result.makeComplexFloat();
+ if (!EvaluateFloat(E->getArg(0), Result.FloatReal, Info))
+ return false;
+ if (!EvaluateFloat(E->getArg(1), Result.FloatImag, Info))
+ return false;
+ return true;
+
+ default:
+ break;
+ }
+
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+}
+
//===----------------------------------------------------------------------===//
// Atomic expression evaluation, essentially just handling the NonAtomicToAtomic
// implicit conversion.
@@ -13792,6 +14347,7 @@ public:
static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
EvalInfo &Info) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && E->getType()->isAtomicType());
return AtomicExprEvaluator(Info, This, Result).Visit(E);
}
@@ -13916,6 +14472,7 @@ bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
}
static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
+ assert(!E->isValueDependent());
assert(E->isRValue() && E->getType()->isVoidType());
return VoidExprEvaluator(Info).Visit(E);
}
@@ -13925,6 +14482,7 @@ static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
//===----------------------------------------------------------------------===//
static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
+ assert(!E->isValueDependent());
// In C, function designators are not lvalues, but we evaluate them as if they
// are.
QualType T = E->getType();
@@ -13965,13 +14523,14 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
} else if (T->isArrayType()) {
LValue LV;
APValue &Value =
- Info.CurrentCall->createTemporary(E, T, false, LV);
+ Info.CurrentCall->createTemporary(E, T, ScopeKind::FullExpression, LV);
if (!EvaluateArray(E, LV, Value, Info))
return false;
Result = Value;
} else if (T->isRecordType()) {
LValue LV;
- APValue &Value = Info.CurrentCall->createTemporary(E, T, false, LV);
+ APValue &Value =
+ Info.CurrentCall->createTemporary(E, T, ScopeKind::FullExpression, LV);
if (!EvaluateRecord(E, LV, Value, Info))
return false;
Result = Value;
@@ -13985,7 +14544,8 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
QualType Unqual = T.getAtomicUnqualifiedType();
if (Unqual->isArrayType() || Unqual->isRecordType()) {
LValue LV;
- APValue &Value = Info.CurrentCall->createTemporary(E, Unqual, false, LV);
+ APValue &Value = Info.CurrentCall->createTemporary(
+ E, Unqual, ScopeKind::FullExpression, LV);
if (!EvaluateAtomic(E, &LV, Value, Info))
return false;
} else {
@@ -14035,6 +14595,7 @@ static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
/// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
/// lvalue-to-rvalue cast if it is an lvalue.
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
+ assert(!E->isValueDependent());
if (Info.EnableNewConstInterp) {
if (!Info.Ctx.getInterpContext().evaluateAsRValue(Info, E, Result))
return false;
@@ -14057,7 +14618,8 @@ static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
}
// Check this core constant expression is a constant expression.
- return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result) &&
+ return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result,
+ ConstantExprKind::Normal) &&
CheckMemoryLeaks(Info);
}
@@ -14098,6 +14660,7 @@ static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result,
static bool EvaluateAsRValue(const Expr *E, Expr::EvalResult &Result,
const ASTContext &Ctx, EvalInfo &Info) {
+ assert(!E->isValueDependent());
bool IsConst;
if (FastEvaluateAsRValue(E, Result, Ctx, IsConst))
return IsConst;
@@ -14109,6 +14672,7 @@ static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult,
const ASTContext &Ctx,
Expr::SideEffectsKind AllowSideEffects,
EvalInfo &Info) {
+ assert(!E->isValueDependent());
if (!E->getType()->isIntegralOrEnumerationType())
return false;
@@ -14124,6 +14688,7 @@ static bool EvaluateAsFixedPoint(const Expr *E, Expr::EvalResult &ExprResult,
const ASTContext &Ctx,
Expr::SideEffectsKind AllowSideEffects,
EvalInfo &Info) {
+ assert(!E->isValueDependent());
if (!E->getType()->isFixedPointType())
return false;
@@ -14212,15 +14777,36 @@ bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx,
Result.HasSideEffects ||
!CheckLValueConstantExpression(Info, getExprLoc(),
Ctx.getLValueReferenceType(getType()), LV,
- Expr::EvaluateForCodeGen, CheckedTemps))
+ ConstantExprKind::Normal, CheckedTemps))
return false;
LV.moveInto(Result.Val);
return true;
}
-bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
- const ASTContext &Ctx, bool InPlace) const {
+static bool EvaluateDestruction(const ASTContext &Ctx, APValue::LValueBase Base,
+ APValue DestroyedValue, QualType Type,
+ SourceLocation Loc, Expr::EvalStatus &EStatus) {
+ EvalInfo Info(Ctx, EStatus, EvalInfo::EM_ConstantExpression);
+ Info.setEvaluatingDecl(Base, DestroyedValue,
+ EvalInfo::EvaluatingDeclKind::Dtor);
+ Info.InConstantContext = true;
+
+ LValue LVal;
+ LVal.set(Base);
+
+ if (!HandleDestruction(Info, Loc, Base, DestroyedValue, Type) ||
+ EStatus.HasSideEffects)
+ return false;
+
+ if (!Info.discardCleanups())
+ llvm_unreachable("Unhandled cleanup; missing full expression marker?");
+
+ return true;
+}
+
+bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx,
+ ConstantExprKind Kind) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
@@ -14228,27 +14814,50 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
EvalInfo Info(Ctx, Result, EM);
Info.InConstantContext = true;
- if (InPlace) {
- Info.setEvaluatingDecl(this, Result.Val);
- LValue LVal;
- LVal.set(this);
- if (!::EvaluateInPlace(Result.Val, Info, LVal, this) ||
- Result.HasSideEffects)
- return false;
- } else if (!::Evaluate(Result.Val, Info, this) || Result.HasSideEffects)
+ // The type of the object we're initializing is 'const T' for a class NTTP.
+ QualType T = getType();
+ if (Kind == ConstantExprKind::ClassTemplateArgument)
+ T.addConst();
+
+ // If we're evaluating a prvalue, fake up a MaterializeTemporaryExpr to
+ // represent the result of the evaluation. CheckConstantExpression ensures
+ // this doesn't escape.
+ MaterializeTemporaryExpr BaseMTE(T, const_cast<Expr*>(this), true);
+ APValue::LValueBase Base(&BaseMTE);
+
+ Info.setEvaluatingDecl(Base, Result.Val);
+ LValue LVal;
+ LVal.set(Base);
+
+ if (!::EvaluateInPlace(Result.Val, Info, LVal, this) || Result.HasSideEffects)
return false;
if (!Info.discardCleanups())
llvm_unreachable("Unhandled cleanup; missing full expression marker?");
- return CheckConstantExpression(Info, getExprLoc(), getStorageType(Ctx, this),
- Result.Val, Usage) &&
- CheckMemoryLeaks(Info);
+ if (!CheckConstantExpression(Info, getExprLoc(), getStorageType(Ctx, this),
+ Result.Val, Kind))
+ return false;
+ if (!CheckMemoryLeaks(Info))
+ return false;
+
+ // If this is a class template argument, it's required to have constant
+ // destruction too.
+ if (Kind == ConstantExprKind::ClassTemplateArgument &&
+ (!EvaluateDestruction(Ctx, Base, Result.Val, T, getBeginLoc(), Result) ||
+ Result.HasSideEffects)) {
+ // FIXME: Prefix a note to indicate that the problem is lack of constant
+ // destruction.
+ return false;
+ }
+
+ return true;
}
bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
const VarDecl *VD,
- SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ SmallVectorImpl<PartialDiagnosticAt> &Notes,
+ bool IsConstantInitialization) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
@@ -14261,11 +14870,12 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
Expr::EvalStatus EStatus;
EStatus.Diag = &Notes;
- EvalInfo Info(Ctx, EStatus, VD->isConstexpr()
- ? EvalInfo::EM_ConstantExpression
- : EvalInfo::EM_ConstantFold);
+ EvalInfo Info(Ctx, EStatus,
+ (IsConstantInitialization && Ctx.getLangOpts().CPlusPlus11)
+ ? EvalInfo::EM_ConstantExpression
+ : EvalInfo::EM_ConstantFold);
Info.setEvaluatingDecl(VD, Value);
- Info.InConstantContext = true;
+ Info.InConstantContext = IsConstantInitialization;
SourceLocation DeclLoc = VD->getLocation();
QualType DeclTy = VD->getType();
@@ -14290,7 +14900,8 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
if (!Info.discardCleanups())
llvm_unreachable("Unhandled cleanup; missing full expression marker?");
}
- return CheckConstantExpression(Info, DeclLoc, DeclTy, Value) &&
+ return CheckConstantExpression(Info, DeclLoc, DeclTy, Value,
+ ConstantExprKind::Normal) &&
CheckMemoryLeaks(Info);
}
@@ -14308,24 +14919,11 @@ bool VarDecl::evaluateDestruction(
else if (!getDefaultInitValue(getType(), DestroyedValue))
return false;
- EvalInfo Info(getASTContext(), EStatus, EvalInfo::EM_ConstantExpression);
- Info.setEvaluatingDecl(this, DestroyedValue,
- EvalInfo::EvaluatingDeclKind::Dtor);
- Info.InConstantContext = true;
-
- SourceLocation DeclLoc = getLocation();
- QualType DeclTy = getType();
-
- LValue LVal;
- LVal.set(this);
-
- if (!HandleDestruction(Info, DeclLoc, LVal.Base, DestroyedValue, DeclTy) ||
+ if (!EvaluateDestruction(getASTContext(), this, std::move(DestroyedValue),
+ getType(), getLocation(), EStatus) ||
EStatus.HasSideEffects)
return false;
- if (!Info.discardCleanups())
- llvm_unreachable("Unhandled cleanup; missing full expression marker?");
-
ensureEvaluatedStmt()->HasConstantDestruction = true;
return true;
}
@@ -14396,7 +14994,6 @@ bool Expr::EvalResult::isGlobalLValue() const {
return IsGlobalLValue(Val.getLValueBase());
}
-
/// isIntegerConstantExpr - this recursive routine will test if an expression is
/// an integer constant expression.
@@ -14599,33 +15196,24 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
return CheckICE(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
Ctx);
case Expr::DeclRefExprClass: {
- if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
+ const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
+ if (isa<EnumConstantDecl>(D))
return NoDiag();
- const ValueDecl *D = cast<DeclRefExpr>(E)->getDecl();
- if (Ctx.getLangOpts().CPlusPlus &&
- D && IsConstNonVolatile(D->getType())) {
- // Parameter variables are never constants. Without this check,
- // getAnyInitializer() can find a default argument, which leads
- // to chaos.
- if (isa<ParmVarDecl>(D))
- return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
-
- // C++ 7.1.5.1p2
- // A variable of non-volatile const-qualified integral or enumeration
- // type initialized by an ICE can be used in ICEs.
- if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) {
- if (!Dcl->getType()->isIntegralOrEnumerationType())
- return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
-
- const VarDecl *VD;
- // Look for a declaration of this variable that has an initializer, and
- // check whether it is an ICE.
- if (Dcl->getAnyInitializer(VD) && VD->checkInitIsICE())
- return NoDiag();
- else
- return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
- }
- }
+
+ // C++ and OpenCL (FIXME: spec reference?) allow reading const-qualified
+ // integer variables in constant expressions:
+ //
+ // C++ 7.1.5.1p2
+ // A variable of non-volatile const-qualified integral or enumeration
+ // type initialized by an ICE can be used in ICEs.
+ //
+ // We sometimes use CheckICE to check the C++98 rules in C++11 mode. In
+ // that mode, use of reference variables should not be allowed.
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (VD && VD->isUsableInConstantExpressions(Ctx) &&
+ !VD->getType()->isReferenceType())
+ return NoDiag();
+
return ICEDiag(IK_NotICE, E->getBeginLoc());
}
case Expr::UnaryOperatorClass: {
@@ -14893,16 +15481,22 @@ bool Expr::isIntegerConstantExpr(const ASTContext &Ctx,
return true;
}
-bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx,
- SourceLocation *Loc, bool isEvaluated) const {
+Optional<llvm::APSInt> Expr::getIntegerConstantExpr(const ASTContext &Ctx,
+ SourceLocation *Loc,
+ bool isEvaluated) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
- if (Ctx.getLangOpts().CPlusPlus11)
- return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc);
+ APSInt Value;
+
+ if (Ctx.getLangOpts().CPlusPlus11) {
+ if (EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc))
+ return Value;
+ return None;
+ }
if (!isIntegerConstantExpr(Ctx, Loc))
- return false;
+ return None;
// The only possible side-effects here are due to UB discovered in the
// evaluation (for instance, INT_MAX + 1). In such a case, we are still
@@ -14916,8 +15510,7 @@ bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx,
if (!::EvaluateAsInt(this, ExprResult, Ctx, SE_AllowSideEffects, Info))
llvm_unreachable("ICE cannot be evaluated!");
- Value = ExprResult.Val.getInt();
- return true;
+ return ExprResult.Val.getInt();
}
bool Expr::isCXX98IntegralConstantExpr(const ASTContext &Ctx) const {
@@ -14989,14 +15582,20 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
Info.EvalStatus.HasSideEffects = false;
}
- ArgVector ArgValues(Args.size());
+ CallRef Call = Info.CurrentCall->createCall(Callee);
for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
I != E; ++I) {
+ unsigned Idx = I - Args.begin();
+ if (Idx >= Callee->getNumParams())
+ break;
+ const ParmVarDecl *PVD = Callee->getParamDecl(Idx);
if ((*I)->isValueDependent() ||
- !Evaluate(ArgValues[I - Args.begin()], Info, *I) ||
- Info.EvalStatus.HasSideEffects)
+ !EvaluateCallArg(PVD, *I, Call, Info) ||
+ Info.EvalStatus.HasSideEffects) {
// If evaluation fails, throw away the argument entirely.
- ArgValues[I - Args.begin()] = APValue();
+ if (APValue *Slot = Info.getParamSlot(Call, PVD))
+ *Slot = APValue();
+ }
// Ignore any side-effects from a failed evaluation. This is safe because
// they can't interfere with any other argument evaluation.
@@ -15009,8 +15608,7 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
Info.EvalStatus.HasSideEffects = false;
// Build fake call to Callee.
- CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr,
- ArgValues.data());
+ CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, Call);
// FIXME: Missing ExprWithCleanups in enable_if conditions?
FullExpressionRAII Scope(Info);
return Evaluate(Value, Info, this) && Scope.destroy() &&
@@ -15026,15 +15624,6 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
if (FD->isDependentContext())
return true;
- // Bail out if a constexpr constructor has an initializer that contains an
- // error. We deliberately don't produce a diagnostic, as we have produced a
- // relevant diagnostic when parsing the error initializer.
- if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(FD)) {
- for (const auto *InitExpr : Ctor->inits()) {
- if (InitExpr->getInit() && InitExpr->getInit()->containsErrors())
- return false;
- }
- }
Expr::EvalStatus Status;
Status.Diag = &Diags;
@@ -15068,7 +15657,7 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
} else {
SourceLocation Loc = FD->getLocation();
HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : nullptr,
- Args, FD->getBody(), Info, Scratch, nullptr);
+ Args, CallRef(), FD->getBody(), Info, Scratch, nullptr);
}
return Diags.empty();
@@ -15090,13 +15679,7 @@ bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
Info.CheckingPotentialConstantExpression = true;
// Fabricate a call stack frame to give the arguments a plausible cover story.
- ArrayRef<const Expr*> Args;
- ArgVector ArgValues(0);
- bool Success = EvaluateArgs(Args, ArgValues, Info, FD);
- (void)Success;
- assert(Success &&
- "Failed to set up arguments for potential constant evaluation");
- CallStackFrame Frame(Info, SourceLocation(), FD, nullptr, ArgValues.data());
+ CallStackFrame Frame(Info, SourceLocation(), FD, /*This*/ nullptr, CallRef());
APValue ResultScratch;
Evaluate(ResultScratch, Info, E);
diff --git a/clang/lib/AST/Interp/Disasm.cpp b/clang/lib/AST/Interp/Disasm.cpp
index 293fdd4b3256..c1c18f832d4f 100644
--- a/clang/lib/AST/Interp/Disasm.cpp
+++ b/clang/lib/AST/Interp/Disasm.cpp
@@ -26,10 +26,10 @@ LLVM_DUMP_METHOD void Function::dump() const { dump(llvm::errs()); }
LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
if (F) {
if (auto *Cons = dyn_cast<CXXConstructorDecl>(F)) {
- const std::string &Name = Cons->getParent()->getNameAsString();
+ DeclarationName Name = Cons->getParent()->getDeclName();
OS << Name << "::" << Name << ":\n";
} else {
- OS << F->getNameAsString() << ":\n";
+ OS << F->getDeclName() << ":\n";
}
} else {
OS << "<<expr>>\n";
diff --git a/clang/lib/AST/Interp/State.cpp b/clang/lib/AST/Interp/State.cpp
index 692cc2e8d69b..56774f88fb45 100644
--- a/clang/lib/AST/Interp/State.cpp
+++ b/clang/lib/AST/Interp/State.cpp
@@ -150,7 +150,7 @@ void State::addCallStack(unsigned Limit) {
}
}
- SmallVector<char, 128> Buffer;
+ SmallString<128> Buffer;
llvm::raw_svector_ostream Out(Buffer);
F->describe(Out);
addDiag(CallLocation, diag::note_constexpr_call_here) << Out.str();
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index ddfbe9f86499..6c8d5687c64a 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -127,9 +127,8 @@ class ItaniumMangleContextImpl : public ItaniumMangleContext {
public:
explicit ItaniumMangleContextImpl(ASTContext &Context,
- DiagnosticsEngine &Diags,
- bool IsUniqueNameMangler)
- : ItaniumMangleContext(Context, Diags, IsUniqueNameMangler) {}
+ DiagnosticsEngine &Diags)
+ : ItaniumMangleContext(Context, Diags) {}
/// @name Mangler Entry Points
/// @{
@@ -480,8 +479,6 @@ private:
const AbiTagList *AdditionalAbiTags);
void mangleUnscopedTemplateName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
- void mangleUnscopedTemplateName(TemplateName,
- const AbiTagList *AdditionalAbiTags);
void mangleSourceName(const IdentifierInfo *II);
void mangleRegCallName(const IdentifierInfo *II);
void mangleDeviceStubName(const IdentifierInfo *II);
@@ -531,8 +528,14 @@ private:
void mangleNeonVectorType(const DependentVectorType *T);
void mangleAArch64NeonVectorType(const VectorType *T);
void mangleAArch64NeonVectorType(const DependentVectorType *T);
+ void mangleAArch64FixedSveVectorType(const VectorType *T);
+ void mangleAArch64FixedSveVectorType(const DependentVectorType *T);
void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
+ void mangleFloatLiteral(QualType T, const llvm::APFloat &V);
+ void mangleFixedPointLiteral();
+ void mangleNullPointer(QualType T);
+
void mangleMemberExprBase(const Expr *base, bool isArrow);
void mangleMemberExpr(const Expr *base, bool isArrow,
NestedNameSpecifier *qualifier,
@@ -548,12 +551,15 @@ private:
void mangleCXXCtorType(CXXCtorType T, const CXXRecordDecl *InheritedFrom);
void mangleCXXDtorType(CXXDtorType T);
- void mangleTemplateArgs(const TemplateArgumentLoc *TemplateArgs,
+ void mangleTemplateArgs(TemplateName TN,
+ const TemplateArgumentLoc *TemplateArgs,
unsigned NumTemplateArgs);
- void mangleTemplateArgs(const TemplateArgument *TemplateArgs,
+ void mangleTemplateArgs(TemplateName TN, const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
- void mangleTemplateArgs(const TemplateArgumentList &AL);
- void mangleTemplateArg(TemplateArgument A);
+ void mangleTemplateArgs(TemplateName TN, const TemplateArgumentList &AL);
+ void mangleTemplateArg(TemplateArgument A, bool NeedExactType);
+ void mangleValueInTemplateArg(QualType T, const APValue &V, bool TopLevel,
+ bool NeedExactType = false);
void mangleTemplateParameter(unsigned Depth, unsigned Index);
@@ -648,15 +654,12 @@ void CXXNameMangler::mangle(GlobalDecl GD) {
Out << "_Z";
if (isa<FunctionDecl>(GD.getDecl()))
mangleFunctionEncoding(GD);
- else if (const VarDecl *VD = dyn_cast<VarDecl>(GD.getDecl()))
- mangleName(VD);
+ else if (isa<VarDecl, FieldDecl, MSGuidDecl, TemplateParamObjectDecl,
+ BindingDecl>(GD.getDecl()))
+ mangleName(GD);
else if (const IndirectFieldDecl *IFD =
dyn_cast<IndirectFieldDecl>(GD.getDecl()))
mangleName(IFD->getAnonField());
- else if (const FieldDecl *FD = dyn_cast<FieldDecl>(GD.getDecl()))
- mangleName(FD);
- else if (const MSGuidDecl *GuidD = dyn_cast<MSGuidDecl>(GD.getDecl()))
- mangleName(GuidD);
else
llvm_unreachable("unexpected kind of global decl");
}
@@ -822,6 +825,11 @@ isTemplate(GlobalDecl GD, const TemplateArgumentList *&TemplateArgs) {
return GlobalDecl();
}
+static TemplateName asTemplateName(GlobalDecl GD) {
+ const TemplateDecl *TD = dyn_cast_or_null<TemplateDecl>(GD.getDecl());
+ return TemplateName(const_cast<TemplateDecl*>(TD));
+}
+
void CXXNameMangler::mangleName(GlobalDecl GD) {
const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
@@ -898,7 +906,7 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
const TemplateArgumentList *TemplateArgs = nullptr;
if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
mangleUnscopedTemplateName(TD, AdditionalAbiTags);
- mangleTemplateArgs(*TemplateArgs);
+ mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
return;
}
@@ -951,7 +959,7 @@ void CXXNameMangler::mangleTemplateName(const TemplateDecl *TD,
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
mangleUnscopedTemplateName(TD, nullptr);
- mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
+ mangleTemplateArgs(asTemplateName(TD), TemplateArgs, NumTemplateArgs);
} else {
mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
}
@@ -991,29 +999,6 @@ void CXXNameMangler::mangleUnscopedTemplateName(
addSubstitution(ND);
}
-void CXXNameMangler::mangleUnscopedTemplateName(
- TemplateName Template, const AbiTagList *AdditionalAbiTags) {
- // <unscoped-template-name> ::= <unscoped-name>
- // ::= <substitution>
- if (TemplateDecl *TD = Template.getAsTemplateDecl())
- return mangleUnscopedTemplateName(TD, AdditionalAbiTags);
-
- if (mangleSubstitution(Template))
- return;
-
- assert(!AdditionalAbiTags &&
- "dependent template name cannot have abi tags");
-
- DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
- assert(Dependent && "Not a dependent template name?");
- if (const IdentifierInfo *Id = Dependent->getIdentifier())
- mangleSourceName(Id);
- else
- mangleOperatorName(Dependent->getOperator(), UnknownArity);
-
- addSubstitution(Template);
-}
-
void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
// ABI:
// Floating-point literals are encoded using a fixed-length
@@ -1056,6 +1041,27 @@ void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
Out.write(buffer.data(), numCharacters);
}
+void CXXNameMangler::mangleFloatLiteral(QualType T, const llvm::APFloat &V) {
+ Out << 'L';
+ mangleType(T);
+ mangleFloat(V);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleFixedPointLiteral() {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot mangle fixed point literals yet");
+ Diags.Report(DiagID);
+}
+
+void CXXNameMangler::mangleNullPointer(QualType T) {
+ // <expr-primary> ::= L <type> 0 E
+ Out << 'L';
+ mangleType(T);
+ Out << "0E";
+}
+
void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
if (Value.isSigned() && Value.isNegative()) {
Out << 'n';
@@ -1103,7 +1109,8 @@ void CXXNameMangler::manglePrefix(QualType type) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(TST->getArgs(), TST->getNumArgs());
+ mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
+ TST->getNumArgs());
addSubstitution(QualType(TST, 0));
}
} else if (const auto *DTST =
@@ -1116,7 +1123,7 @@ void CXXNameMangler::manglePrefix(QualType type) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs());
+ mangleTemplateArgs(Template, DTST->getArgs(), DTST->getNumArgs());
addSubstitution(QualType(DTST, 0));
}
} else {
@@ -1259,7 +1266,7 @@ void CXXNameMangler::mangleUnresolvedName(
// The <simple-id> and on <operator-name> productions end in an optional
// <template-args>.
if (TemplateArgs)
- mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
+ mangleTemplateArgs(TemplateName(), TemplateArgs, NumTemplateArgs);
}
void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
@@ -1302,6 +1309,14 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
break;
}
+ if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
+ // Proposed in https://github.com/itanium-cxx-abi/cxx-abi/issues/63.
+ Out << "TA";
+ mangleValueInTemplateArg(TPO->getType().getUnqualifiedType(),
+ TPO->getValue(), /*TopLevel=*/true);
+ break;
+ }
+
if (II) {
// Match GCC's naming convention for internal linkage symbols, for
// symbols that are not actually visible outside of this TU. GCC
@@ -1409,8 +1424,7 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
// <lambda-sig> ::= <template-param-decl>* <parameter-type>+
// # Parameter types or 'v' for 'void'.
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
- if (Record->isLambda() && (Record->getLambdaManglingNumber() ||
- Context.isUniqueNameMangler())) {
+ if (Record->isLambda() && Record->getLambdaManglingNumber()) {
assert(!AdditionalAbiTags &&
"Lambda type cannot have additional abi tags");
mangleLambda(Record);
@@ -1451,10 +1465,13 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
case DeclarationName::CXXConstructorName: {
const CXXRecordDecl *InheritedFrom = nullptr;
+ TemplateName InheritedTemplateName;
const TemplateArgumentList *InheritedTemplateArgs = nullptr;
if (auto Inherited =
cast<CXXConstructorDecl>(ND)->getInheritedConstructor()) {
InheritedFrom = Inherited.getConstructor()->getParent();
+ InheritedTemplateName =
+ TemplateName(Inherited.getConstructor()->getPrimaryTemplate());
InheritedTemplateArgs =
Inherited.getConstructor()->getTemplateSpecializationArgs();
}
@@ -1471,7 +1488,7 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
// FIXME: The template arguments are part of the enclosing prefix or
// nested-name, but it's more convenient to mangle them here.
if (InheritedTemplateArgs)
- mangleTemplateArgs(*InheritedTemplateArgs);
+ mangleTemplateArgs(InheritedTemplateName, *InheritedTemplateArgs);
writeAbiTags(ND, AdditionalAbiTags);
break;
@@ -1560,7 +1577,7 @@ void CXXNameMangler::mangleNestedName(GlobalDecl GD,
const TemplateArgumentList *TemplateArgs = nullptr;
if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
mangleTemplatePrefix(TD, NoFunction);
- mangleTemplateArgs(*TemplateArgs);
+ mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
}
else {
manglePrefix(DC, NoFunction);
@@ -1577,7 +1594,7 @@ void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
Out << 'N';
mangleTemplatePrefix(TD);
- mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
+ mangleTemplateArgs(asTemplateName(TD), TemplateArgs, NumTemplateArgs);
Out << 'E';
}
@@ -1785,37 +1802,6 @@ void CXXNameMangler::mangleTemplateParamDecl(const NamedDecl *Decl) {
}
}
-// Handles the __builtin_unique_stable_name feature for lambdas. Instead of the
-// ordinal of the lambda in its mangling, this does line/column to uniquely and
-// reliably identify the lambda. Additionally, macro expansions are expressed
-// as well to prevent macros causing duplicates.
-static void mangleUniqueNameLambda(CXXNameMangler &Mangler, SourceManager &SM,
- raw_ostream &Out,
- const CXXRecordDecl *Lambda) {
- SourceLocation Loc = Lambda->getLocation();
-
- PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- Mangler.mangleNumber(PLoc.getLine());
- Out << "_";
- Mangler.mangleNumber(PLoc.getColumn());
-
- while(Loc.isMacroID()) {
- SourceLocation SLToPrint = Loc;
- if (SM.isMacroArgExpansion(Loc))
- SLToPrint = SM.getImmediateExpansionRange(Loc).getBegin();
-
- PLoc = SM.getPresumedLoc(SM.getSpellingLoc(SLToPrint));
- Out << "m";
- Mangler.mangleNumber(PLoc.getLine());
- Out << "_";
- Mangler.mangleNumber(PLoc.getColumn());
-
- Loc = SM.getImmediateMacroCallerLoc(Loc);
- if (Loc.isFileID())
- Loc = SM.getImmediateMacroCallerLoc(SLToPrint);
- }
-}
-
void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
// If the context of a closure type is an initializer for a class member
// (static or nonstatic), it is encoded in a qualified name with a final
@@ -1835,8 +1821,8 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
= cast<NamedDecl>(Context)->getIdentifier()) {
mangleSourceName(Name);
const TemplateArgumentList *TemplateArgs = nullptr;
- if (isTemplate(cast<NamedDecl>(Context), TemplateArgs))
- mangleTemplateArgs(*TemplateArgs);
+ if (GlobalDecl TD = isTemplate(cast<NamedDecl>(Context), TemplateArgs))
+ mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
Out << 'M';
}
}
@@ -1846,12 +1832,6 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
mangleLambdaSig(Lambda);
Out << "E";
- if (Context.isUniqueNameMangler()) {
- mangleUniqueNameLambda(
- *this, Context.getASTContext().getSourceManager(), Out, Lambda);
- return;
- }
-
// The number is omitted for the first closure type with a given
// <lambda-sig> in a given context; it is n-2 for the nth closure type
// (in lexical order) with that same <lambda-sig> and context.
@@ -1933,7 +1913,7 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
const TemplateArgumentList *TemplateArgs = nullptr;
if (GlobalDecl TD = isTemplate(ND, TemplateArgs)) {
mangleTemplatePrefix(TD);
- mangleTemplateArgs(*TemplateArgs);
+ mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
} else {
manglePrefix(getEffectiveDeclContext(ND), NoFunction);
mangleUnqualifiedName(ND, nullptr);
@@ -1949,21 +1929,28 @@ void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
if (TemplateDecl *TD = Template.getAsTemplateDecl())
return mangleTemplatePrefix(TD);
- if (QualifiedTemplateName *Qualified = Template.getAsQualifiedTemplateName())
- manglePrefix(Qualified->getQualifier());
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "unexpected template name kind");
- if (OverloadedTemplateStorage *Overloaded
- = Template.getAsOverloadedTemplate()) {
- mangleUnqualifiedName(GlobalDecl(), (*Overloaded->begin())->getDeclName(),
- UnknownArity, nullptr);
+ // Clang 11 and before mangled the substitution for a dependent template name
+ // after already having emitted (a substitution for) the prefix.
+ bool Clang11Compat = getASTContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver11;
+ if (!Clang11Compat && mangleSubstitution(Template))
return;
- }
- DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
- assert(Dependent && "Unknown template name kind?");
if (NestedNameSpecifier *Qualifier = Dependent->getQualifier())
manglePrefix(Qualifier);
- mangleUnscopedTemplateName(Template, /* AdditionalAbiTags */ nullptr);
+
+ if (Clang11Compat && mangleSubstitution(Template))
+ return;
+
+ if (const IdentifierInfo *Id = Dependent->getIdentifier())
+ mangleSourceName(Id);
+ else
+ mangleOperatorName(Dependent->getOperator(), UnknownArity);
+
+ addSubstitution(Template);
}
void CXXNameMangler::mangleTemplatePrefix(GlobalDecl GD,
@@ -2185,7 +2172,12 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
}
}
- mangleTemplateArgs(TST->getArgs(), TST->getNumArgs());
+ // Note: we don't pass in the template name here. We are mangling the
+ // original source-level template arguments, so we shouldn't consider
+ // conversions to the corresponding template parameter.
+ // FIXME: Other compilers mangle partially-resolved template arguments in
+ // unresolved-qualifier-levels.
+ mangleTemplateArgs(TemplateName(), TST->getArgs(), TST->getNumArgs());
break;
}
@@ -2201,8 +2193,10 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::DependentTemplateSpecialization: {
const DependentTemplateSpecializationType *DTST =
cast<DependentTemplateSpecializationType>(Ty);
+ TemplateName Template = getASTContext().getDependentTemplateName(
+ DTST->getQualifier(), DTST->getIdentifier());
mangleSourceName(DTST->getIdentifier());
- mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs());
+ mangleTemplateArgs(Template, DTST->getArgs(), DTST->getNumArgs());
break;
}
@@ -2388,16 +2382,39 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals, const DependentAddressSp
switch (AS) {
default: llvm_unreachable("Not a language specific address space");
// <OpenCL-addrspace> ::= "CL" [ "global" | "local" | "constant" |
- // "private"| "generic" ]
- case LangAS::opencl_global: ASString = "CLglobal"; break;
- case LangAS::opencl_local: ASString = "CLlocal"; break;
- case LangAS::opencl_constant: ASString = "CLconstant"; break;
- case LangAS::opencl_private: ASString = "CLprivate"; break;
- case LangAS::opencl_generic: ASString = "CLgeneric"; break;
+ // "private"| "generic" | "device" |
+ // "host" ]
+ case LangAS::opencl_global:
+ ASString = "CLglobal";
+ break;
+ case LangAS::opencl_global_device:
+ ASString = "CLdevice";
+ break;
+ case LangAS::opencl_global_host:
+ ASString = "CLhost";
+ break;
+ case LangAS::opencl_local:
+ ASString = "CLlocal";
+ break;
+ case LangAS::opencl_constant:
+ ASString = "CLconstant";
+ break;
+ case LangAS::opencl_private:
+ ASString = "CLprivate";
+ break;
+ case LangAS::opencl_generic:
+ ASString = "CLgeneric";
+ break;
// <CUDA-addrspace> ::= "CU" [ "device" | "constant" | "shared" ]
- case LangAS::cuda_device: ASString = "CUdevice"; break;
- case LangAS::cuda_constant: ASString = "CUconstant"; break;
- case LangAS::cuda_shared: ASString = "CUshared"; break;
+ case LangAS::cuda_device:
+ ASString = "CUdevice";
+ break;
+ case LangAS::cuda_constant:
+ ASString = "CUconstant";
+ break;
+ case LangAS::cuda_shared:
+ ASString = "CUshared";
+ break;
// <ptrsize-addrspace> ::= [ "ptr32_sptr" | "ptr32_uptr" | "ptr64" ]
case LangAS::ptr32_sptr:
ASString = "ptr32_sptr";
@@ -2489,7 +2506,7 @@ void CXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) {
}
void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
- Context.mangleObjCMethodName(MD, Out);
+ Context.mangleObjCMethodNameAsSourceName(MD, Out);
}
static bool isTypeSubstitutable(Qualifiers Quals, const Type *Ty,
@@ -2507,6 +2524,12 @@ static bool isTypeSubstitutable(Qualifiers Quals, const Type *Ty,
if (Ctx.getLangOpts().getClangABICompat() > LangOptions::ClangABI::Ver6 &&
isa<AutoType>(Ty))
return false;
+ // A placeholder type for class template deduction is substitutable with
+ // its corresponding template name; this is handled specially when mangling
+ // the type.
+ if (auto *DeducedTST = Ty->getAs<DeducedTemplateSpecializationType>())
+ if (DeducedTST->getDeducedType().isNull())
+ return false;
return true;
}
@@ -2555,6 +2578,10 @@ void CXXNameMangler::mangleType(QualType T) {
if (!TST->isTypeAlias())
break;
+ // FIXME: We presumably shouldn't strip off ElaboratedTypes with
+ // instantation-dependent qualifiers. See
+ // https://github.com/itanium-cxx-abi/cxx-abi/issues/114.
+
QualType Desugared
= T.getSingleStepDesugaredType(Context.getASTContext());
if (Desugared == T)
@@ -2835,6 +2862,12 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
<< type_name; \
break;
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id: \
+ type_name = #Name; \
+ Out << 'u' << type_name.size() << type_name; \
+ break;
+#include "clang/Basic/PPCTypes.def"
}
}
@@ -3248,7 +3281,7 @@ static StringRef mangleAArch64VectorBase(const BuiltinType *EltType) {
case BuiltinType::Double:
return "Float64";
case BuiltinType::BFloat16:
- return "BFloat16";
+ return "Bfloat16";
default:
llvm_unreachable("Unexpected vector element base type");
}
@@ -3298,6 +3331,103 @@ void CXXNameMangler::mangleAArch64NeonVectorType(const DependentVectorType *T) {
Diags.Report(T->getAttributeLoc(), DiagID);
}
+// The AArch64 ACLE specifies that fixed-length SVE vector and predicate types
+// defined with the 'arm_sve_vector_bits' attribute map to the same AAPCS64
+// type as the sizeless variants.
+//
+// The mangling scheme for VLS types is implemented as a "pseudo" template:
+//
+// '__SVE_VLS<<type>, <vector length>>'
+//
+// Combining the existing SVE type and a specific vector length (in bits).
+// For example:
+//
+// typedef __SVInt32_t foo __attribute__((arm_sve_vector_bits(512)));
+//
+// is described as '__SVE_VLS<__SVInt32_t, 512u>' and mangled as:
+//
+// "9__SVE_VLSI" + base type mangling + "Lj" + __ARM_FEATURE_SVE_BITS + "EE"
+//
+// i.e. 9__SVE_VLSIu11__SVInt32_tLj512EE
+//
+// The latest ACLE specification (00bet5) does not contain details of this
+// mangling scheme, it will be specified in the next revision. The mangling
+// scheme is otherwise defined in the appendices to the Procedure Call Standard
+// for the Arm Architecture, see
+// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst#appendix-c-mangling
+void CXXNameMangler::mangleAArch64FixedSveVectorType(const VectorType *T) {
+ assert((T->getVectorKind() == VectorType::SveFixedLengthDataVector ||
+ T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) &&
+ "expected fixed-length SVE vector!");
+
+ QualType EltType = T->getElementType();
+ assert(EltType->isBuiltinType() &&
+ "expected builtin type for fixed-length SVE vector!");
+
+ StringRef TypeName;
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar:
+ TypeName = "__SVInt8_t";
+ break;
+ case BuiltinType::UChar: {
+ if (T->getVectorKind() == VectorType::SveFixedLengthDataVector)
+ TypeName = "__SVUint8_t";
+ else
+ TypeName = "__SVBool_t";
+ break;
+ }
+ case BuiltinType::Short:
+ TypeName = "__SVInt16_t";
+ break;
+ case BuiltinType::UShort:
+ TypeName = "__SVUint16_t";
+ break;
+ case BuiltinType::Int:
+ TypeName = "__SVInt32_t";
+ break;
+ case BuiltinType::UInt:
+ TypeName = "__SVUint32_t";
+ break;
+ case BuiltinType::Long:
+ TypeName = "__SVInt64_t";
+ break;
+ case BuiltinType::ULong:
+ TypeName = "__SVUint64_t";
+ break;
+ case BuiltinType::Half:
+ TypeName = "__SVFloat16_t";
+ break;
+ case BuiltinType::Float:
+ TypeName = "__SVFloat32_t";
+ break;
+ case BuiltinType::Double:
+ TypeName = "__SVFloat64_t";
+ break;
+ case BuiltinType::BFloat16:
+ TypeName = "__SVBfloat16_t";
+ break;
+ default:
+ llvm_unreachable("unexpected element type for fixed-length SVE vector!");
+ }
+
+ unsigned VecSizeInBits = getASTContext().getTypeInfo(T).Width;
+
+ if (T->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ VecSizeInBits *= 8;
+
+ Out << "9__SVE_VLSI" << 'u' << TypeName.size() << TypeName << "Lj"
+ << VecSizeInBits << "EE";
+}
+
+void CXXNameMangler::mangleAArch64FixedSveVectorType(
+ const DependentVectorType *T) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot mangle this dependent fixed-length SVE vector type yet");
+ Diags.Report(T->getAttributeLoc(), DiagID);
+}
+
// GNU extension: vector types
// <type> ::= <vector-type>
// <vector-type> ::= Dv <positive dimension number> _
@@ -3318,6 +3448,10 @@ void CXXNameMangler::mangleType(const VectorType *T) {
else
mangleNeonVectorType(T);
return;
+ } else if (T->getVectorKind() == VectorType::SveFixedLengthDataVector ||
+ T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
+ mangleAArch64FixedSveVectorType(T);
+ return;
}
Out << "Dv" << T->getNumElements() << '_';
if (T->getVectorKind() == VectorType::AltiVecPixel)
@@ -3340,6 +3474,10 @@ void CXXNameMangler::mangleType(const DependentVectorType *T) {
else
mangleNeonVectorType(T);
return;
+ } else if (T->getVectorKind() == VectorType::SveFixedLengthDataVector ||
+ T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
+ mangleAArch64FixedSveVectorType(T);
+ return;
}
Out << "Dv";
@@ -3364,10 +3502,13 @@ void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
}
void CXXNameMangler::mangleType(const ConstantMatrixType *T) {
- // Mangle matrix types using a vendor extended type qualifier:
- // U<Len>matrix_type<Rows><Columns><element type>
+ // Mangle matrix types as a vendor extended type:
+ // u<Len>matrix_typeI<Rows><Columns><element type>E
+
StringRef VendorQualifier = "matrix_type";
- Out << "U" << VendorQualifier.size() << VendorQualifier;
+ Out << "u" << VendorQualifier.size() << VendorQualifier;
+
+ Out << "I";
auto &ASTCtx = getASTContext();
unsigned BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
llvm::APSInt Rows(BitWidth);
@@ -3377,15 +3518,20 @@ void CXXNameMangler::mangleType(const ConstantMatrixType *T) {
Columns = T->getNumColumns();
mangleIntegerLiteral(ASTCtx.getSizeType(), Columns);
mangleType(T->getElementType());
+ Out << "E";
}
void CXXNameMangler::mangleType(const DependentSizedMatrixType *T) {
- // U<Len>matrix_type<row expr><column expr><element type>
+ // Mangle matrix types as a vendor extended type:
+ // u<Len>matrix_typeI<row expr><column expr><element type>E
StringRef VendorQualifier = "matrix_type";
- Out << "U" << VendorQualifier.size() << VendorQualifier;
- mangleTemplateArg(T->getRowExpr());
- mangleTemplateArg(T->getColumnExpr());
+ Out << "u" << VendorQualifier.size() << VendorQualifier;
+
+ Out << "I";
+ mangleTemplateArg(T->getRowExpr(), false);
+ mangleTemplateArg(T->getColumnExpr(), false);
mangleType(T->getElementType());
+ Out << "E";
}
void CXXNameMangler::mangleType(const DependentAddressSpaceType *T) {
@@ -3456,7 +3602,7 @@ void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(T->getArgs(), T->getNumArgs());
+ mangleTemplateArgs(T->getTemplateName(), T->getArgs(), T->getNumArgs());
addSubstitution(QualType(T, 0));
}
}
@@ -3508,7 +3654,7 @@ void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(T->getArgs(), T->getNumArgs());
+ mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs());
Out << 'E';
}
@@ -3575,13 +3721,18 @@ void CXXNameMangler::mangleType(const AutoType *T) {
}
void CXXNameMangler::mangleType(const DeducedTemplateSpecializationType *T) {
- // FIXME: This is not the right mangling. We also need to include a scope
- // here in some cases.
- QualType D = T->getDeducedType();
- if (D.isNull())
- mangleUnscopedTemplateName(T->getTemplateName(), nullptr);
- else
- mangleType(D);
+ QualType Deduced = T->getDeducedType();
+ if (!Deduced.isNull())
+ return mangleType(Deduced);
+
+ TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl();
+ assert(TD && "shouldn't form deduced TST unless we know we have a template");
+
+ if (mangleSubstitution(TD))
+ return;
+
+ mangleName(GlobalDecl(TD));
+ addSubstitution(TD);
}
void CXXNameMangler::mangleType(const AtomicType *T) {
@@ -3603,7 +3754,7 @@ void CXXNameMangler::mangleType(const ExtIntType *T) {
llvm::APSInt BW(32, true);
BW = T->getNumBits();
TemplateArgument TA(Context.getASTContext(), BW, getASTContext().IntTy);
- mangleTemplateArgs(&TA, 1);
+ mangleTemplateArgs(TemplateName(), &TA, 1);
if (T->isUnsigned())
Out << "j";
else
@@ -3613,7 +3764,7 @@ void CXXNameMangler::mangleType(const ExtIntType *T) {
void CXXNameMangler::mangleType(const DependentExtIntType *T) {
Out << "U7_ExtInt";
TemplateArgument TA(T->getNumBitsExpr());
- mangleTemplateArgs(&TA, 1);
+ mangleTemplateArgs(TemplateName(), &TA, 1);
if (T->isUnsigned())
Out << "j";
else
@@ -3841,7 +3992,6 @@ recurse:
case Expr::PseudoObjectExprClass:
case Expr::AtomicExprClass:
case Expr::SourceLocExprClass:
- case Expr::FixedPointLiteralClass:
case Expr::BuiltinBitCastExprClass:
{
if (!NullOut) {
@@ -4066,7 +4216,7 @@ recurse:
case Expr::CXXUnresolvedConstructExprClass: {
const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
- unsigned N = CE->arg_size();
+ unsigned N = CE->getNumArgs();
if (CE->isListInitialization()) {
assert(N == 1 && "unexpected form for list initialization");
@@ -4415,13 +4565,14 @@ recurse:
case Expr::FloatingLiteralClass: {
const FloatingLiteral *FL = cast<FloatingLiteral>(E);
- Out << 'L';
- mangleType(FL->getType());
- mangleFloat(FL->getValue());
- Out << 'E';
+ mangleFloatLiteral(FL->getType(), FL->getValue());
break;
}
+ case Expr::FixedPointLiteralClass:
+ mangleFixedPointLiteral();
+ break;
+
case Expr::CharacterLiteralClass:
Out << 'L';
mangleType(E->getType());
@@ -4484,9 +4635,7 @@ recurse:
case Expr::GNUNullExprClass:
// Mangle as if an integer literal 0.
- Out << 'L';
- mangleType(E->getType());
- Out << "0E";
+ mangleIntegerLiteral(E->getType(), llvm::APSInt(32));
break;
case Expr::CXXNullPtrLiteralExprClass: {
@@ -4504,7 +4653,7 @@ recurse:
if (SPE->isPartiallySubstituted()) {
Out << "sP";
for (const auto &A : SPE->getPartialArguments())
- mangleTemplateArg(A);
+ mangleTemplateArg(A, false);
Out << "E";
break;
}
@@ -4692,33 +4841,112 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
}
}
-void CXXNameMangler::mangleTemplateArgs(const TemplateArgumentLoc *TemplateArgs,
+namespace {
+// Helper to provide ancillary information on a template used to mangle its
+// arguments.
+struct TemplateArgManglingInfo {
+ TemplateDecl *ResolvedTemplate = nullptr;
+ bool SeenPackExpansionIntoNonPack = false;
+ const NamedDecl *UnresolvedExpandedPack = nullptr;
+
+ TemplateArgManglingInfo(TemplateName TN) {
+ if (TemplateDecl *TD = TN.getAsTemplateDecl())
+ ResolvedTemplate = TD;
+ }
+
+ /// Do we need to mangle template arguments with exactly correct types?
+ ///
+ /// This should be called exactly once for each parameter / argument pair, in
+ /// order.
+ bool needExactType(unsigned ParamIdx, const TemplateArgument &Arg) {
+ // We need correct types when the template-name is unresolved or when it
+ // names a template that is able to be overloaded.
+ if (!ResolvedTemplate || SeenPackExpansionIntoNonPack)
+ return true;
+
+ // Move to the next parameter.
+ const NamedDecl *Param = UnresolvedExpandedPack;
+ if (!Param) {
+ assert(ParamIdx < ResolvedTemplate->getTemplateParameters()->size() &&
+ "no parameter for argument");
+ Param = ResolvedTemplate->getTemplateParameters()->getParam(ParamIdx);
+
+ // If we reach an expanded parameter pack whose argument isn't in pack
+ // form, that means Sema couldn't figure out which arguments belonged to
+ // it, because it contains a pack expansion. Track the expanded pack for
+ // all further template arguments until we hit that pack expansion.
+ if (Param->isParameterPack() && Arg.getKind() != TemplateArgument::Pack) {
+ assert(getExpandedPackSize(Param) &&
+ "failed to form pack argument for parameter pack");
+ UnresolvedExpandedPack = Param;
+ }
+ }
+
+ // If we encounter a pack argument that is expanded into a non-pack
+ // parameter, we can no longer track parameter / argument correspondence,
+ // and need to use exact types from this point onwards.
+ if (Arg.isPackExpansion() &&
+ (!Param->isParameterPack() || UnresolvedExpandedPack)) {
+ SeenPackExpansionIntoNonPack = true;
+ return true;
+ }
+
+ // We need exact types for function template arguments because they might be
+ // overloaded on template parameter type. As a special case, a member
+ // function template of a generic lambda is not overloadable.
+ if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ResolvedTemplate)) {
+ auto *RD = dyn_cast<CXXRecordDecl>(FTD->getDeclContext());
+ if (!RD || !RD->isGenericLambda())
+ return true;
+ }
+
+ // Otherwise, we only need a correct type if the parameter has a deduced
+ // type.
+ //
+ // Note: for an expanded parameter pack, getType() returns the type prior
+ // to expansion. We could ask for the expanded type with getExpansionType(),
+ // but it doesn't matter because substitution and expansion don't affect
+ // whether a deduced type appears in the type.
+ auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param);
+ return NTTP && NTTP->getType()->getContainedDeducedType();
+ }
+};
+}
+
+void CXXNameMangler::mangleTemplateArgs(TemplateName TN,
+ const TemplateArgumentLoc *TemplateArgs,
unsigned NumTemplateArgs) {
// <template-args> ::= I <template-arg>+ E
Out << 'I';
+ TemplateArgManglingInfo Info(TN);
for (unsigned i = 0; i != NumTemplateArgs; ++i)
- mangleTemplateArg(TemplateArgs[i].getArgument());
+ mangleTemplateArg(TemplateArgs[i].getArgument(),
+ Info.needExactType(i, TemplateArgs[i].getArgument()));
Out << 'E';
}
-void CXXNameMangler::mangleTemplateArgs(const TemplateArgumentList &AL) {
+void CXXNameMangler::mangleTemplateArgs(TemplateName TN,
+ const TemplateArgumentList &AL) {
// <template-args> ::= I <template-arg>+ E
Out << 'I';
+ TemplateArgManglingInfo Info(TN);
for (unsigned i = 0, e = AL.size(); i != e; ++i)
- mangleTemplateArg(AL[i]);
+ mangleTemplateArg(AL[i], Info.needExactType(i, AL[i]));
Out << 'E';
}
-void CXXNameMangler::mangleTemplateArgs(const TemplateArgument *TemplateArgs,
+void CXXNameMangler::mangleTemplateArgs(TemplateName TN,
+ const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs) {
// <template-args> ::= I <template-arg>+ E
Out << 'I';
+ TemplateArgManglingInfo Info(TN);
for (unsigned i = 0; i != NumTemplateArgs; ++i)
- mangleTemplateArg(TemplateArgs[i]);
+ mangleTemplateArg(TemplateArgs[i], Info.needExactType(i, TemplateArgs[i]));
Out << 'E';
}
-void CXXNameMangler::mangleTemplateArg(TemplateArgument A) {
+void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) {
// <template-arg> ::= <type> # type or template
// ::= X <expression> E # expression
// ::= <expr-primary> # simple expressions
@@ -4767,42 +4995,517 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A) {
break;
case TemplateArgument::Declaration: {
// <expr-primary> ::= L <mangled-name> E # external name
- // Clang produces AST's where pointer-to-member-function expressions
- // and pointer-to-function expressions are represented as a declaration not
- // an expression. We compensate for it here to produce the correct mangling.
ValueDecl *D = A.getAsDecl();
- bool compensateMangling = !A.getParamTypeForDecl()->isReferenceType();
- if (compensateMangling) {
- Out << 'X';
- mangleOperatorName(OO_Amp, 1);
- }
- Out << 'L';
- // References to external entities use the mangled name; if the name would
- // not normally be mangled then mangle it as unqualified.
- mangle(D);
- Out << 'E';
-
- if (compensateMangling)
- Out << 'E';
+ // Template parameter objects are modeled by reproducing a source form
+ // produced as if by aggregate initialization.
+ if (A.getParamTypeForDecl()->isRecordType()) {
+ auto *TPO = cast<TemplateParamObjectDecl>(D);
+ mangleValueInTemplateArg(TPO->getType().getUnqualifiedType(),
+ TPO->getValue(), /*TopLevel=*/true,
+ NeedExactType);
+ break;
+ }
+ ASTContext &Ctx = Context.getASTContext();
+ APValue Value;
+ if (D->isCXXInstanceMember())
+ // Simple pointer-to-member with no conversion.
+ Value = APValue(D, /*IsDerivedMember=*/false, /*Path=*/{});
+ else if (D->getType()->isArrayType() &&
+ Ctx.hasSimilarType(Ctx.getDecayedType(D->getType()),
+ A.getParamTypeForDecl()) &&
+ Ctx.getLangOpts().getClangABICompat() >
+ LangOptions::ClangABI::Ver11)
+ // Build a value corresponding to this implicit array-to-pointer decay.
+ Value = APValue(APValue::LValueBase(D), CharUnits::Zero(),
+ {APValue::LValuePathEntry::ArrayIndex(0)},
+ /*OnePastTheEnd=*/false);
+ else
+ // Regular pointer or reference to a declaration.
+ Value = APValue(APValue::LValueBase(D), CharUnits::Zero(),
+ ArrayRef<APValue::LValuePathEntry>(),
+ /*OnePastTheEnd=*/false);
+ mangleValueInTemplateArg(A.getParamTypeForDecl(), Value, /*TopLevel=*/true,
+ NeedExactType);
break;
}
case TemplateArgument::NullPtr: {
- // <expr-primary> ::= L <type> 0 E
- Out << 'L';
- mangleType(A.getNullPtrType());
- Out << "0E";
+ mangleNullPointer(A.getNullPtrType());
break;
}
case TemplateArgument::Pack: {
// <template-arg> ::= J <template-arg>* E
Out << 'J';
for (const auto &P : A.pack_elements())
- mangleTemplateArg(P);
+ mangleTemplateArg(P, NeedExactType);
+ Out << 'E';
+ }
+ }
+}
+
+/// Determine whether a given value is equivalent to zero-initialization for
+/// the purpose of discarding a trailing portion of a 'tl' mangling.
+///
+/// Note that this is not in general equivalent to determining whether the
+/// value has an all-zeroes bit pattern.
+static bool isZeroInitialized(QualType T, const APValue &V) {
+ // FIXME: mangleValueInTemplateArg has quadratic time complexity in
+ // pathological cases due to using this, but it's a little awkward
+ // to do this in linear time in general.
+ switch (V.getKind()) {
+ case APValue::None:
+ case APValue::Indeterminate:
+ case APValue::AddrLabelDiff:
+ return false;
+
+ case APValue::Struct: {
+ const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ assert(RD && "unexpected type for record value");
+ unsigned I = 0;
+ for (const CXXBaseSpecifier &BS : RD->bases()) {
+ if (!isZeroInitialized(BS.getType(), V.getStructBase(I)))
+ return false;
+ ++I;
+ }
+ I = 0;
+ for (const FieldDecl *FD : RD->fields()) {
+ if (!FD->isUnnamedBitfield() &&
+ !isZeroInitialized(FD->getType(), V.getStructField(I)))
+ return false;
+ ++I;
+ }
+ return true;
+ }
+
+ case APValue::Union: {
+ const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ assert(RD && "unexpected type for union value");
+ // Zero-initialization zeroes the first non-unnamed-bitfield field, if any.
+ for (const FieldDecl *FD : RD->fields()) {
+ if (!FD->isUnnamedBitfield())
+ return V.getUnionField() && declaresSameEntity(FD, V.getUnionField()) &&
+ isZeroInitialized(FD->getType(), V.getUnionValue());
+ }
+ // If there are no fields (other than unnamed bitfields), the value is
+ // necessarily zero-initialized.
+ return true;
+ }
+
+ case APValue::Array: {
+ QualType ElemT(T->getArrayElementTypeNoTypeQual(), 0);
+ for (unsigned I = 0, N = V.getArrayInitializedElts(); I != N; ++I)
+ if (!isZeroInitialized(ElemT, V.getArrayInitializedElt(I)))
+ return false;
+ return !V.hasArrayFiller() || isZeroInitialized(ElemT, V.getArrayFiller());
+ }
+
+ case APValue::Vector: {
+ const VectorType *VT = T->castAs<VectorType>();
+ for (unsigned I = 0, N = V.getVectorLength(); I != N; ++I)
+ if (!isZeroInitialized(VT->getElementType(), V.getVectorElt(I)))
+ return false;
+ return true;
+ }
+
+ case APValue::Int:
+ return !V.getInt();
+
+ case APValue::Float:
+ return V.getFloat().isPosZero();
+
+ case APValue::FixedPoint:
+ return !V.getFixedPoint().getValue();
+
+ case APValue::ComplexFloat:
+ return V.getComplexFloatReal().isPosZero() &&
+ V.getComplexFloatImag().isPosZero();
+
+ case APValue::ComplexInt:
+ return !V.getComplexIntReal() && !V.getComplexIntImag();
+
+ case APValue::LValue:
+ return V.isNullPointer();
+
+ case APValue::MemberPointer:
+ return !V.getMemberPointerDecl();
+ }
+
+ llvm_unreachable("Unhandled APValue::ValueKind enum");
+}
+
+static QualType getLValueType(ASTContext &Ctx, const APValue &LV) {
+ QualType T = LV.getLValueBase().getType();
+ for (APValue::LValuePathEntry E : LV.getLValuePath()) {
+ if (const ArrayType *AT = Ctx.getAsArrayType(T))
+ T = AT->getElementType();
+ else if (const FieldDecl *FD =
+ dyn_cast<FieldDecl>(E.getAsBaseOrMember().getPointer()))
+ T = FD->getType();
+ else
+ T = Ctx.getRecordType(
+ cast<CXXRecordDecl>(E.getAsBaseOrMember().getPointer()));
+ }
+ return T;
+}
+
+void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
+ bool TopLevel,
+ bool NeedExactType) {
+ // Ignore all top-level cv-qualifiers, to match GCC.
+ Qualifiers Quals;
+ T = getASTContext().getUnqualifiedArrayType(T, Quals);
+
+ // A top-level expression that's not a primary expression is wrapped in X...E.
+ bool IsPrimaryExpr = true;
+ auto NotPrimaryExpr = [&] {
+ if (TopLevel && IsPrimaryExpr)
+ Out << 'X';
+ IsPrimaryExpr = false;
+ };
+
+ // Proposed in https://github.com/itanium-cxx-abi/cxx-abi/issues/63.
+ switch (V.getKind()) {
+ case APValue::None:
+ case APValue::Indeterminate:
+ Out << 'L';
+ mangleType(T);
Out << 'E';
+ break;
+
+ case APValue::AddrLabelDiff:
+ llvm_unreachable("unexpected value kind in template argument");
+
+ case APValue::Struct: {
+ const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ assert(RD && "unexpected type for record value");
+
+ // Drop trailing zero-initialized elements.
+ llvm::SmallVector<const FieldDecl *, 16> Fields(RD->field_begin(),
+ RD->field_end());
+ while (
+ !Fields.empty() &&
+ (Fields.back()->isUnnamedBitfield() ||
+ isZeroInitialized(Fields.back()->getType(),
+ V.getStructField(Fields.back()->getFieldIndex())))) {
+ Fields.pop_back();
+ }
+ llvm::ArrayRef<CXXBaseSpecifier> Bases(RD->bases_begin(), RD->bases_end());
+ if (Fields.empty()) {
+ while (!Bases.empty() &&
+ isZeroInitialized(Bases.back().getType(),
+ V.getStructBase(Bases.size() - 1)))
+ Bases = Bases.drop_back();
+ }
+
+ // <expression> ::= tl <type> <braced-expression>* E
+ NotPrimaryExpr();
+ Out << "tl";
+ mangleType(T);
+ for (unsigned I = 0, N = Bases.size(); I != N; ++I)
+ mangleValueInTemplateArg(Bases[I].getType(), V.getStructBase(I), false);
+ for (unsigned I = 0, N = Fields.size(); I != N; ++I) {
+ if (Fields[I]->isUnnamedBitfield())
+ continue;
+ mangleValueInTemplateArg(Fields[I]->getType(),
+ V.getStructField(Fields[I]->getFieldIndex()),
+ false);
+ }
+ Out << 'E';
+ break;
}
+
+ case APValue::Union: {
+ assert(T->getAsCXXRecordDecl() && "unexpected type for union value");
+ const FieldDecl *FD = V.getUnionField();
+
+ if (!FD) {
+ Out << 'L';
+ mangleType(T);
+ Out << 'E';
+ break;
+ }
+
+ // <braced-expression> ::= di <field source-name> <braced-expression>
+ NotPrimaryExpr();
+ Out << "tl";
+ mangleType(T);
+ if (!isZeroInitialized(T, V)) {
+ Out << "di";
+ mangleSourceName(FD->getIdentifier());
+ mangleValueInTemplateArg(FD->getType(), V.getUnionValue(), false);
+ }
+ Out << 'E';
+ break;
}
+
+ case APValue::Array: {
+ QualType ElemT(T->getArrayElementTypeNoTypeQual(), 0);
+
+ NotPrimaryExpr();
+ Out << "tl";
+ mangleType(T);
+
+ // Drop trailing zero-initialized elements.
+ unsigned N = V.getArraySize();
+ if (!V.hasArrayFiller() || isZeroInitialized(ElemT, V.getArrayFiller())) {
+ N = V.getArrayInitializedElts();
+ while (N && isZeroInitialized(ElemT, V.getArrayInitializedElt(N - 1)))
+ --N;
+ }
+
+ for (unsigned I = 0; I != N; ++I) {
+ const APValue &Elem = I < V.getArrayInitializedElts()
+ ? V.getArrayInitializedElt(I)
+ : V.getArrayFiller();
+ mangleValueInTemplateArg(ElemT, Elem, false);
+ }
+ Out << 'E';
+ break;
+ }
+
+ case APValue::Vector: {
+ const VectorType *VT = T->castAs<VectorType>();
+
+ NotPrimaryExpr();
+ Out << "tl";
+ mangleType(T);
+ unsigned N = V.getVectorLength();
+ while (N && isZeroInitialized(VT->getElementType(), V.getVectorElt(N - 1)))
+ --N;
+ for (unsigned I = 0; I != N; ++I)
+ mangleValueInTemplateArg(VT->getElementType(), V.getVectorElt(I), false);
+ Out << 'E';
+ break;
+ }
+
+ case APValue::Int:
+ mangleIntegerLiteral(T, V.getInt());
+ break;
+
+ case APValue::Float:
+ mangleFloatLiteral(T, V.getFloat());
+ break;
+
+ case APValue::FixedPoint:
+ mangleFixedPointLiteral();
+ break;
+
+ case APValue::ComplexFloat: {
+ const ComplexType *CT = T->castAs<ComplexType>();
+ NotPrimaryExpr();
+ Out << "tl";
+ mangleType(T);
+ if (!V.getComplexFloatReal().isPosZero() ||
+ !V.getComplexFloatImag().isPosZero())
+ mangleFloatLiteral(CT->getElementType(), V.getComplexFloatReal());
+ if (!V.getComplexFloatImag().isPosZero())
+ mangleFloatLiteral(CT->getElementType(), V.getComplexFloatImag());
+ Out << 'E';
+ break;
+ }
+
+ case APValue::ComplexInt: {
+ const ComplexType *CT = T->castAs<ComplexType>();
+ NotPrimaryExpr();
+ Out << "tl";
+ mangleType(T);
+ if (V.getComplexIntReal().getBoolValue() ||
+ V.getComplexIntImag().getBoolValue())
+ mangleIntegerLiteral(CT->getElementType(), V.getComplexIntReal());
+ if (V.getComplexIntImag().getBoolValue())
+ mangleIntegerLiteral(CT->getElementType(), V.getComplexIntImag());
+ Out << 'E';
+ break;
+ }
+
+ case APValue::LValue: {
+ // Proposed in https://github.com/itanium-cxx-abi/cxx-abi/issues/47.
+ assert((T->isPointerType() || T->isReferenceType()) &&
+ "unexpected type for LValue template arg");
+
+ if (V.isNullPointer()) {
+ mangleNullPointer(T);
+ break;
+ }
+
+ APValue::LValueBase B = V.getLValueBase();
+ if (!B) {
+ // Non-standard mangling for integer cast to a pointer; this can only
+ // occur as an extension.
+ CharUnits Offset = V.getLValueOffset();
+ if (Offset.isZero()) {
+ // This is reinterpret_cast<T*>(0), not a null pointer. Mangle this as
+ // a cast, because L <type> 0 E means something else.
+ NotPrimaryExpr();
+ Out << "rc";
+ mangleType(T);
+ Out << "Li0E";
+ if (TopLevel)
+ Out << 'E';
+ } else {
+ Out << "L";
+ mangleType(T);
+ Out << Offset.getQuantity() << 'E';
+ }
+ break;
+ }
+
+ ASTContext &Ctx = Context.getASTContext();
+
+ enum { Base, Offset, Path } Kind;
+ if (!V.hasLValuePath()) {
+ // Mangle as (T*)((char*)&base + N).
+ if (T->isReferenceType()) {
+ NotPrimaryExpr();
+ Out << "decvP";
+ mangleType(T->getPointeeType());
+ } else {
+ NotPrimaryExpr();
+ Out << "cv";
+ mangleType(T);
+ }
+ Out << "plcvPcad";
+ Kind = Offset;
+ } else {
+ if (!V.getLValuePath().empty() || V.isLValueOnePastTheEnd()) {
+ NotPrimaryExpr();
+ // A final conversion to the template parameter's type is usually
+ // folded into the 'so' mangling, but we can't do that for 'void*'
+ // parameters without introducing collisions.
+ if (NeedExactType && T->isVoidPointerType()) {
+ Out << "cv";
+ mangleType(T);
+ }
+ if (T->isPointerType())
+ Out << "ad";
+ Out << "so";
+ mangleType(T->isVoidPointerType()
+ ? getLValueType(Ctx, V).getUnqualifiedType()
+ : T->getPointeeType());
+ Kind = Path;
+ } else {
+ if (NeedExactType &&
+ !Ctx.hasSameType(T->getPointeeType(), getLValueType(Ctx, V)) &&
+ Ctx.getLangOpts().getClangABICompat() >
+ LangOptions::ClangABI::Ver11) {
+ NotPrimaryExpr();
+ Out << "cv";
+ mangleType(T);
+ }
+ if (T->isPointerType()) {
+ NotPrimaryExpr();
+ Out << "ad";
+ }
+ Kind = Base;
+ }
+ }
+
+ QualType TypeSoFar = B.getType();
+ if (auto *VD = B.dyn_cast<const ValueDecl*>()) {
+ Out << 'L';
+ mangle(VD);
+ Out << 'E';
+ } else if (auto *E = B.dyn_cast<const Expr*>()) {
+ NotPrimaryExpr();
+ mangleExpression(E);
+ } else if (auto TI = B.dyn_cast<TypeInfoLValue>()) {
+ NotPrimaryExpr();
+ Out << "ti";
+ mangleType(QualType(TI.getType(), 0));
+ } else {
+ // We should never see dynamic allocations here.
+ llvm_unreachable("unexpected lvalue base kind in template argument");
+ }
+
+ switch (Kind) {
+ case Base:
+ break;
+
+ case Offset:
+ Out << 'L';
+ mangleType(Ctx.getPointerDiffType());
+ mangleNumber(V.getLValueOffset().getQuantity());
+ Out << 'E';
+ break;
+
+ case Path:
+ // <expression> ::= so <referent type> <expr> [<offset number>]
+ // <union-selector>* [p] E
+ if (!V.getLValueOffset().isZero())
+ mangleNumber(V.getLValueOffset().getQuantity());
+
+ // We model a past-the-end array pointer as array indexing with index N,
+ // not with the "past the end" flag. Compensate for that.
+ bool OnePastTheEnd = V.isLValueOnePastTheEnd();
+
+ for (APValue::LValuePathEntry E : V.getLValuePath()) {
+ if (auto *AT = TypeSoFar->getAsArrayTypeUnsafe()) {
+ if (auto *CAT = dyn_cast<ConstantArrayType>(AT))
+ OnePastTheEnd |= CAT->getSize() == E.getAsArrayIndex();
+ TypeSoFar = AT->getElementType();
+ } else {
+ const Decl *D = E.getAsBaseOrMember().getPointer();
+ if (auto *FD = dyn_cast<FieldDecl>(D)) {
+ // <union-selector> ::= _ <number>
+ if (FD->getParent()->isUnion()) {
+ Out << '_';
+ if (FD->getFieldIndex())
+ Out << (FD->getFieldIndex() - 1);
+ }
+ TypeSoFar = FD->getType();
+ } else {
+ TypeSoFar = Ctx.getRecordType(cast<CXXRecordDecl>(D));
+ }
+ }
+ }
+
+ if (OnePastTheEnd)
+ Out << 'p';
+ Out << 'E';
+ break;
+ }
+
+ break;
+ }
+
+ case APValue::MemberPointer:
+ // Proposed in https://github.com/itanium-cxx-abi/cxx-abi/issues/47.
+ if (!V.getMemberPointerDecl()) {
+ mangleNullPointer(T);
+ break;
+ }
+
+ ASTContext &Ctx = Context.getASTContext();
+
+ NotPrimaryExpr();
+ if (!V.getMemberPointerPath().empty()) {
+ Out << "mc";
+ mangleType(T);
+ } else if (NeedExactType &&
+ !Ctx.hasSameType(
+ T->castAs<MemberPointerType>()->getPointeeType(),
+ V.getMemberPointerDecl()->getType()) &&
+ Ctx.getLangOpts().getClangABICompat() >
+ LangOptions::ClangABI::Ver11) {
+ Out << "cv";
+ mangleType(T);
+ }
+ Out << "adL";
+ mangle(V.getMemberPointerDecl());
+ Out << 'E';
+ if (!V.getMemberPointerPath().empty()) {
+ CharUnits Offset =
+ Context.getASTContext().getMemberPointerPathAdjustment(V);
+ if (!Offset.isZero())
+ mangleNumber(Offset.getQuantity());
+ Out << 'E';
+ }
+ break;
+ }
+
+ if (TopLevel && !IsPrimaryExpr)
+ Out << 'E';
}
void CXXNameMangler::mangleTemplateParameter(unsigned Depth, unsigned Index) {
@@ -5121,8 +5824,8 @@ bool CXXNameMangler::shouldHaveAbiTags(ItaniumMangleContextImpl &C,
void ItaniumMangleContextImpl::mangleCXXName(GlobalDecl GD,
raw_ostream &Out) {
const NamedDecl *D = cast<NamedDecl>(GD.getDecl());
- assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
- "Invalid mangleName() call, argument is not a variable or function!");
+ assert((isa<FunctionDecl, VarDecl, TemplateParamObjectDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
getASTContext().getSourceManager(),
@@ -5351,8 +6054,7 @@ void ItaniumMangleContextImpl::mangleLambdaSig(const CXXRecordDecl *Lambda,
Mangler.mangleLambdaSig(Lambda);
}
-ItaniumMangleContext *ItaniumMangleContext::create(ASTContext &Context,
- DiagnosticsEngine &Diags,
- bool IsUniqueNameMangler) {
- return new ItaniumMangleContextImpl(Context, Diags, IsUniqueNameMangler);
+ItaniumMangleContext *
+ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
+ return new ItaniumMangleContextImpl(Context, Diags);
}
diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp
index 4bd00ece86ab..7b99546bbe2d 100644
--- a/clang/lib/AST/JSONNodeDumper.cpp
+++ b/clang/lib/AST/JSONNodeDumper.cpp
@@ -616,6 +616,12 @@ void JSONNodeDumper::VisitVectorType(const VectorType *VT) {
case VectorType::NeonPolyVector:
JOS.attribute("vectorKind", "neon poly");
break;
+ case VectorType::SveFixedLengthDataVector:
+ JOS.attribute("vectorKind", "fixed-length sve data vector");
+ break;
+ case VectorType::SveFixedLengthPredicateVector:
+ JOS.attribute("vectorKind", "fixed-length sve predicate vector");
+ break;
}
}
@@ -1412,7 +1418,7 @@ void JSONNodeDumper::VisitFixedPointLiteral(const FixedPointLiteral *FPL) {
JOS.attribute("value", FPL->getValueAsString(/*Radix=*/10));
}
void JSONNodeDumper::VisitFloatingLiteral(const FloatingLiteral *FL) {
- llvm::SmallVector<char, 16> Buffer;
+ llvm::SmallString<16> Buffer;
FL->getValue().toString(Buffer);
JOS.attribute("value", Buffer);
}
diff --git a/clang/lib/AST/Linkage.h b/clang/lib/AST/Linkage.h
index 5d8acf0016f4..cd50d138790a 100644
--- a/clang/lib/AST/Linkage.h
+++ b/clang/lib/AST/Linkage.h
@@ -140,6 +140,8 @@ class LinkageComputer {
LinkageInfo getLVForTemplateParameterList(const TemplateParameterList *Params,
LVComputationKind computation);
+ LinkageInfo getLVForValue(const APValue &V, LVComputationKind computation);
+
public:
LinkageInfo computeLVForDecl(const NamedDecl *D,
LVComputationKind computation,
diff --git a/clang/lib/AST/Mangle.cpp b/clang/lib/AST/Mangle.cpp
index a732325006c6..3282fcbd584f 100644
--- a/clang/lib/AST/Mangle.cpp
+++ b/clang/lib/AST/Mangle.cpp
@@ -175,7 +175,7 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
const TargetInfo &TI = Context.getTargetInfo();
if (CC == CCM_Other || (MCXX && TI.getCXXABI() == TargetCXXABI::Microsoft)) {
if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
- mangleObjCMethodName(OMD, Out);
+ mangleObjCMethodNameAsSourceName(OMD, Out);
else
mangleCXXName(GD, Out);
return;
@@ -192,7 +192,7 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
if (!MCXX)
Out << D->getIdentifier()->getName();
else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
- mangleObjCMethodName(OMD, Out);
+ mangleObjCMethodNameAsSourceName(OMD, Out);
else
mangleCXXName(GD, Out);
@@ -275,7 +275,7 @@ void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
SmallString<64> Buffer;
llvm::raw_svector_ostream Stream(Buffer);
if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
- mangleObjCMethodName(Method, Stream);
+ mangleObjCMethodNameAsSourceName(Method, Stream);
} else {
assert((isa<NamedDecl>(DC) || isa<BlockDecl>(DC)) &&
"expected a NamedDecl or BlockDecl");
@@ -304,29 +304,70 @@ void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
mangleFunctionBlock(*this, Buffer, BD, Out);
}
-void MangleContext::mangleObjCMethodNameWithoutSize(const ObjCMethodDecl *MD,
- raw_ostream &OS) {
- const ObjCContainerDecl *CD =
- dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
- assert (CD && "Missing container decl in GetNameForMethod");
+void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
+ raw_ostream &OS,
+ bool includePrefixByte,
+ bool includeCategoryNamespace) {
+ if (getASTContext().getLangOpts().ObjCRuntime.isGNUFamily()) {
+ // This is the mangling we've always used on the GNU runtimes, but it
+ // has obvious collisions in the face of underscores within class
+ // names, category names, and selectors; maybe we should improve it.
+
+ OS << (MD->isClassMethod() ? "_c_" : "_i_")
+ << MD->getClassInterface()->getName() << '_';
+
+ if (includeCategoryNamespace) {
+ if (auto category = MD->getCategory())
+ OS << category->getName();
+ }
+ OS << '_';
+
+ auto selector = MD->getSelector();
+ for (unsigned slotIndex = 0,
+ numArgs = selector.getNumArgs(),
+ slotEnd = std::max(numArgs, 1U);
+ slotIndex != slotEnd; ++slotIndex) {
+ if (auto name = selector.getIdentifierInfoForSlot(slotIndex))
+ OS << name->getName();
+
+ // Replace all the positions that would've been ':' with '_'.
+ // That's after each slot except that a unary selector doesn't
+ // end in ':'.
+ if (numArgs)
+ OS << '_';
+ }
+
+ return;
+ }
+
+ // \01+[ContainerName(CategoryName) SelectorName]
+ if (includePrefixByte) {
+ OS << '\01';
+ }
OS << (MD->isInstanceMethod() ? '-' : '+') << '[';
- if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD)) {
+ if (const auto *CID = MD->getCategory()) {
OS << CID->getClassInterface()->getName();
- OS << '(' << *CID << ')';
- } else {
+ if (includeCategoryNamespace) {
+ OS << '(' << *CID << ')';
+ }
+ } else if (const auto *CD =
+ dyn_cast<ObjCContainerDecl>(MD->getDeclContext())) {
OS << CD->getName();
+ } else {
+ llvm_unreachable("Unexpected ObjC method decl context");
}
OS << ' ';
MD->getSelector().print(OS);
OS << ']';
}
-void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
- raw_ostream &Out) {
+void MangleContext::mangleObjCMethodNameAsSourceName(const ObjCMethodDecl *MD,
+ raw_ostream &Out) {
SmallString<64> Name;
llvm::raw_svector_ostream OS(Name);
- mangleObjCMethodNameWithoutSize(MD, OS);
+ mangleObjCMethodName(MD, OS, /*includePrefixByte=*/false,
+ /*includeCategoryNamespace=*/true);
Out << OS.str().size() << OS.str();
}
@@ -352,7 +393,8 @@ public:
if (writeFuncOrVarName(VD, FrontendBufOS))
return true;
} else if (auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
- MC->mangleObjCMethodNameWithoutSize(MD, OS);
+ MC->mangleObjCMethodName(MD, OS, /*includePrefixByte=*/false,
+ /*includeCategoryNamespace=*/true);
return false;
} else if (auto *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
writeObjCClassName(ID, FrontendBufOS);
diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp
index 529f301e4696..df6c566abc7d 100644
--- a/clang/lib/AST/MicrosoftMangle.cpp
+++ b/clang/lib/AST/MicrosoftMangle.cpp
@@ -50,7 +50,7 @@ struct msvc_hashing_ostream : public llvm::raw_svector_ostream {
bool StartsWithEscape = MangledName.startswith("\01");
if (StartsWithEscape)
MangledName = MangledName.drop_front(1);
- if (MangledName.size() <= 4096) {
+ if (MangledName.size() < 4096) {
OS << str();
return;
}
@@ -308,12 +308,17 @@ public:
void mangleName(const NamedDecl *ND);
void mangleFunctionEncoding(const FunctionDecl *FD, bool ShouldMangle);
void mangleVariableEncoding(const VarDecl *VD);
- void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD);
+ void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD,
+ StringRef Prefix = "$");
void mangleMemberFunctionPointer(const CXXRecordDecl *RD,
- const CXXMethodDecl *MD);
+ const CXXMethodDecl *MD,
+ StringRef Prefix = "$");
void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
const MethodVFTableLocation &ML);
void mangleNumber(int64_t Number);
+ void mangleNumber(llvm::APSInt Number);
+ void mangleFloat(llvm::APFloat Number);
+ void mangleBits(llvm::APInt Number);
void mangleTagTypeKind(TagTypeKind TK);
void mangleArtificialTagType(TagTypeKind TK, StringRef UnqualifiedName,
ArrayRef<StringRef> NestedNames = None);
@@ -378,14 +383,18 @@ private:
void mangleFunctionClass(const FunctionDecl *FD);
void mangleCallingConvention(CallingConv CC);
void mangleCallingConvention(const FunctionType *T);
- void mangleIntegerLiteral(const llvm::APSInt &Number, bool IsBoolean);
- void mangleExpression(const Expr *E);
+ void mangleIntegerLiteral(const llvm::APSInt &Number,
+ const NonTypeTemplateParmDecl *PD = nullptr,
+ QualType TemplateArgType = QualType());
+ void mangleExpression(const Expr *E, const NonTypeTemplateParmDecl *PD);
void mangleThrowSpecification(const FunctionProtoType *T);
void mangleTemplateArgs(const TemplateDecl *TD,
const TemplateArgumentList &TemplateArgs);
void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA,
const NamedDecl *Parm);
+ void mangleTemplateArgValue(QualType T, const APValue &V,
+ bool WithScalarType = false);
void mangleObjCProtocol(const ObjCProtocolDecl *PD);
void mangleObjCLifetime(const QualType T, Qualifiers Quals,
@@ -501,7 +510,10 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
// MSVC appears to mangle GUIDs as if they were variables of type
// 'const struct __s_GUID'.
Out << "3U__s_GUID@@B";
- else
+ else if (isa<TemplateParamObjectDecl>(D)) {
+ // Template parameter objects don't get a <type-encoding>; their type is
+ // specified as part of their value.
+ } else
llvm_unreachable("Tried to mangle unexpected NamedDecl!");
}
@@ -592,7 +604,8 @@ void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
}
void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
- const ValueDecl *VD) {
+ const ValueDecl *VD,
+ StringRef Prefix) {
// <member-data-pointer> ::= <integer-literal>
// ::= $F <number> <number>
// ::= $G <number> <number> <number>
@@ -624,7 +637,7 @@ void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
case MSInheritanceModel::Unspecified: Code = 'G'; break;
}
- Out << '$' << Code;
+ Out << Prefix << Code;
mangleNumber(FieldOffset);
@@ -639,7 +652,8 @@ void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
void
MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
- const CXXMethodDecl *MD) {
+ const CXXMethodDecl *MD,
+ StringRef Prefix) {
// <member-function-pointer> ::= $1? <name>
// ::= $H? <name> <number>
// ::= $I? <name> <number> <number>
@@ -661,7 +675,7 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
uint64_t VBTableOffset = 0;
uint64_t VBPtrOffset = 0;
if (MD) {
- Out << '$' << Code << '?';
+ Out << Prefix << Code << '?';
if (MD->isVirtual()) {
MicrosoftVTableContext *VTContext =
cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
@@ -684,12 +698,12 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
} else {
// Null single inheritance member functions are encoded as a simple nullptr.
if (IM == MSInheritanceModel::Single) {
- Out << "$0A@";
+ Out << Prefix << "0A@";
return;
}
if (IM == MSInheritanceModel::Unspecified)
VBTableOffset = -1;
- Out << '$' << Code;
+ Out << Prefix << Code;
}
if (inheritanceModelHasNVOffsetField(/*IsMemberFunction=*/true, IM))
@@ -728,32 +742,63 @@ void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
}
void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
+ mangleNumber(llvm::APSInt(llvm::APInt(64, Number), /*IsUnsigned*/false));
+}
+
+void MicrosoftCXXNameMangler::mangleNumber(llvm::APSInt Number) {
+ // MSVC never mangles any integer wider than 64 bits. In general it appears
+ // to convert every integer to signed 64 bit before mangling (including
+ // unsigned 64 bit values). Do the same, but preserve bits beyond the bottom
+ // 64.
+ llvm::APInt Value =
+ Number.isSigned() ? Number.sextOrSelf(64) : Number.zextOrSelf(64);
+
// <non-negative integer> ::= A@ # when Number == 0
// ::= <decimal digit> # when 1 <= Number <= 10
// ::= <hex digit>+ @ # when Number >= 10
//
// <number> ::= [?] <non-negative integer>
- uint64_t Value = static_cast<uint64_t>(Number);
- if (Number < 0) {
+ if (Value.isNegative()) {
Value = -Value;
Out << '?';
}
+ mangleBits(Value);
+}
+
+void MicrosoftCXXNameMangler::mangleFloat(llvm::APFloat Number) {
+ using llvm::APFloat;
+
+ switch (APFloat::SemanticsToEnum(Number.getSemantics())) {
+ case APFloat::S_IEEEsingle: Out << 'A'; break;
+ case APFloat::S_IEEEdouble: Out << 'B'; break;
+ // The following are all Clang extensions. We try to pick manglings that are
+ // unlikely to conflict with MSVC's scheme.
+ case APFloat::S_IEEEhalf: Out << 'V'; break;
+ case APFloat::S_BFloat: Out << 'W'; break;
+ case APFloat::S_x87DoubleExtended: Out << 'X'; break;
+ case APFloat::S_IEEEquad: Out << 'Y'; break;
+ case APFloat::S_PPCDoubleDouble: Out << 'Z'; break;
+ }
+
+ mangleBits(Number.bitcastToAPInt());
+}
+
+void MicrosoftCXXNameMangler::mangleBits(llvm::APInt Value) {
if (Value == 0)
Out << "A@";
- else if (Value >= 1 && Value <= 10)
+ else if (Value.uge(1) && Value.ule(10))
Out << (Value - 1);
else {
// Numbers that are not encoded as decimal digits are represented as nibbles
// in the range of ASCII characters 'A' to 'P'.
// The number 0x123450 would be encoded as 'BCDEFA'
- char EncodedNumberBuffer[sizeof(uint64_t) * 2];
- MutableArrayRef<char> BufferRef(EncodedNumberBuffer);
- MutableArrayRef<char>::reverse_iterator I = BufferRef.rbegin();
- for (; Value != 0; Value >>= 4)
- *I++ = 'A' + (Value & 0xf);
- Out.write(I.base(), I - BufferRef.rbegin());
+ llvm::SmallString<32> EncodedNumberBuffer;
+ for (; Value != 0; Value.lshrInPlace(4))
+ EncodedNumberBuffer.push_back('A' + (Value & 0xf).getZExtValue());
+ std::reverse(EncodedNumberBuffer.begin(), EncodedNumberBuffer.end());
+ Out.write(EncodedNumberBuffer.data(), EncodedNumberBuffer.size());
Out << '@';
}
}
@@ -907,6 +952,13 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
break;
}
+ if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
+ Out << "?__N";
+ mangleTemplateArgValue(TPO->getType().getUnqualifiedType(),
+ TPO->getValue());
+ break;
+ }
+
// We must have an anonymous struct.
const TagDecl *TD = cast<TagDecl>(ND);
if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
@@ -1321,7 +1373,7 @@ void MicrosoftCXXNameMangler::mangleSourceName(StringRef Name) {
}
void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
- Context.mangleObjCMethodName(MD, Out);
+ Context.mangleObjCMethodNameAsSourceName(MD, Out);
}
void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
@@ -1357,24 +1409,33 @@ MicrosoftCXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *TD) {
mangleUnqualifiedName(TD);
}
-void MicrosoftCXXNameMangler::mangleIntegerLiteral(const llvm::APSInt &Value,
- bool IsBoolean) {
+void MicrosoftCXXNameMangler::mangleIntegerLiteral(
+ const llvm::APSInt &Value, const NonTypeTemplateParmDecl *PD,
+ QualType TemplateArgType) {
// <integer-literal> ::= $0 <number>
- Out << "$0";
- // Make sure booleans are encoded as 0/1.
- if (IsBoolean && Value.getBoolValue())
- mangleNumber(1);
- else if (Value.isSigned())
- mangleNumber(Value.getSExtValue());
- else
- mangleNumber(Value.getZExtValue());
+ Out << "$";
+
+ // Since MSVC 2019, add 'M[<type>]' after '$' for auto template parameter when
+ // argument is integer.
+ if (getASTContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2019) &&
+ PD && PD->getType()->getTypeClass() == Type::Auto &&
+ !TemplateArgType.isNull()) {
+ Out << "M";
+ mangleType(TemplateArgType, SourceRange(), QMM_Drop);
+ }
+
+ Out << "0";
+
+ mangleNumber(Value);
}
-void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
+void MicrosoftCXXNameMangler::mangleExpression(
+ const Expr *E, const NonTypeTemplateParmDecl *PD) {
// See if this is a constant expression.
- llvm::APSInt Value;
- if (E->isIntegerConstantExpr(Value, Context.getASTContext())) {
- mangleIntegerLiteral(Value, E->getType()->isBooleanType());
+ if (Optional<llvm::APSInt> Value =
+ E->getIntegerConstantExpr(Context.getASTContext())) {
+ mangleIntegerLiteral(*Value, PD, E->getType());
return;
}
@@ -1412,10 +1473,34 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
// ::= <integer-literal>
// ::= <member-data-pointer>
// ::= <member-function-pointer>
- // ::= $E? <name> <type-encoding>
- // ::= $1? <name> <type-encoding>
- // ::= $0A@
+ // ::= $ <constant-value>
// ::= <template-args>
+ //
+ // <constant-value> ::= 0 <number> # integer
+ // ::= 1 <mangled-name> # address of D
+ // ::= 2 <type> <typed-constant-value>* @ # struct
+ // ::= 3 <type> <constant-value>* @ # array
+ // ::= 4 ??? # string
+ // ::= 5 <constant-value> @ # address of subobject
+ // ::= 6 <constant-value> <unqualified-name> @ # a.b
+ // ::= 7 <type> [<unqualified-name> <constant-value>] @
+ // # union, with or without an active member
+ // # pointer to member, symbolically
+ // ::= 8 <class> <unqualified-name> @
+ // ::= A <type> <non-negative integer> # float
+ // ::= B <type> <non-negative integer> # double
+ // ::= E <mangled-name> # reference to D
+ // # pointer to member, by component value
+ // ::= F <number> <number>
+ // ::= G <number> <number> <number>
+ // ::= H <mangled-name> <number>
+ // ::= I <mangled-name> <number> <number>
+ // ::= J <mangled-name> <number> <number> <number>
+ //
+ // <typed-constant-value> ::= [<type>] <constant-value>
+ //
+ // The <type> appears to be included in a <typed-constant-value> only in the
+ // '0', '1', '8', 'A', 'B', and 'E' cases.
switch (TA.getKind()) {
case TemplateArgument::Null:
@@ -1443,15 +1528,22 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
mangleName(FD);
mangleFunctionEncoding(FD, /*ShouldMangle=*/true);
}
+ } else if (TA.getParamTypeForDecl()->isRecordType()) {
+ Out << "$";
+ auto *TPO = cast<TemplateParamObjectDecl>(ND);
+ mangleTemplateArgValue(TPO->getType().getUnqualifiedType(),
+ TPO->getValue());
} else {
mangle(ND, TA.getParamTypeForDecl()->isReferenceType() ? "$E?" : "$1?");
}
break;
}
- case TemplateArgument::Integral:
+ case TemplateArgument::Integral: {
+ QualType T = TA.getIntegralType();
mangleIntegerLiteral(TA.getAsIntegral(),
- TA.getIntegralType()->isBooleanType());
+ cast<NonTypeTemplateParmDecl>(Parm), T);
break;
+ }
case TemplateArgument::NullPtr: {
QualType T = TA.getNullPtrType();
if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
@@ -1473,16 +1565,18 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
// However, we are free to use 0 *if* we would use multiple fields for
// non-nullptr member pointers.
if (!RD->nullFieldOffsetIsZero()) {
- mangleIntegerLiteral(llvm::APSInt::get(-1), /*IsBoolean=*/false);
+ mangleIntegerLiteral(llvm::APSInt::get(-1),
+ cast<NonTypeTemplateParmDecl>(Parm), T);
return;
}
}
}
- mangleIntegerLiteral(llvm::APSInt::getUnsigned(0), /*IsBoolean=*/false);
+ mangleIntegerLiteral(llvm::APSInt::getUnsigned(0),
+ cast<NonTypeTemplateParmDecl>(Parm), T);
break;
}
case TemplateArgument::Expression:
- mangleExpression(TA.getAsExpr());
+ mangleExpression(TA.getAsExpr(), cast<NonTypeTemplateParmDecl>(Parm));
break;
case TemplateArgument::Pack: {
ArrayRef<TemplateArgument> TemplateArgs = TA.getPackAsArray();
@@ -1521,6 +1615,212 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
}
}
+void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
+ const APValue &V,
+ bool WithScalarType) {
+ switch (V.getKind()) {
+ case APValue::None:
+ case APValue::Indeterminate:
+ // FIXME: MSVC doesn't allow this, so we can't be sure how it should be
+ // mangled.
+ if (WithScalarType)
+ mangleType(T, SourceRange(), QMM_Escape);
+ Out << '@';
+ return;
+
+ case APValue::Int:
+ if (WithScalarType)
+ mangleType(T, SourceRange(), QMM_Escape);
+ Out << '0';
+ mangleNumber(V.getInt());
+ return;
+
+ case APValue::Float:
+ if (WithScalarType)
+ mangleType(T, SourceRange(), QMM_Escape);
+ mangleFloat(V.getFloat());
+ return;
+
+ case APValue::LValue: {
+ if (WithScalarType)
+ mangleType(T, SourceRange(), QMM_Escape);
+
+ // We don't know how to mangle past-the-end pointers yet.
+ if (V.isLValueOnePastTheEnd())
+ break;
+
+ APValue::LValueBase Base = V.getLValueBase();
+ if (!V.hasLValuePath() || V.getLValuePath().empty()) {
+ // Taking the address of a complete object has a special-case mangling.
+ if (Base.isNull()) {
+ // MSVC emits 0A@ for null pointers. Generalize this for arbitrary
+ // integers cast to pointers.
+ // FIXME: This mangles 0 cast to a pointer the same as a null pointer,
+ // even in cases where the two are different values.
+ Out << "0";
+ mangleNumber(V.getLValueOffset().getQuantity());
+ } else if (!V.hasLValuePath()) {
+ // FIXME: This can only happen as an extension. Invent a mangling.
+ break;
+ } else if (auto *VD = Base.dyn_cast<const ValueDecl*>()) {
+ Out << (T->isReferenceType() ? "E" : "1");
+ mangle(VD);
+ } else {
+ break;
+ }
+ } else {
+ unsigned NumAts = 0;
+ if (T->isPointerType()) {
+ Out << "5";
+ ++NumAts;
+ }
+
+ QualType T = Base.getType();
+ for (APValue::LValuePathEntry E : V.getLValuePath()) {
+ // We don't know how to mangle array subscripting yet.
+ if (T->isArrayType())
+ goto mangling_unknown;
+
+ const Decl *D = E.getAsBaseOrMember().getPointer();
+ auto *FD = dyn_cast<FieldDecl>(D);
+ // We don't know how to mangle derived-to-base conversions yet.
+ if (!FD)
+ goto mangling_unknown;
+
+ Out << "6";
+ ++NumAts;
+ T = FD->getType();
+ }
+
+ auto *VD = Base.dyn_cast<const ValueDecl*>();
+ if (!VD)
+ break;
+ Out << "E";
+ mangle(VD);
+
+ for (APValue::LValuePathEntry E : V.getLValuePath()) {
+ const Decl *D = E.getAsBaseOrMember().getPointer();
+ mangleUnqualifiedName(cast<FieldDecl>(D));
+ }
+ for (unsigned I = 0; I != NumAts; ++I)
+ Out << '@';
+ }
+
+ return;
+ }
+
+ case APValue::MemberPointer: {
+ if (WithScalarType)
+ mangleType(T, SourceRange(), QMM_Escape);
+
+ // FIXME: The below manglings don't include a conversion, so bail if there
+ // would be one. MSVC mangles the (possibly converted) value of the
+ // pointer-to-member object as if it were a struct, leading to collisions
+ // in some cases.
+ if (!V.getMemberPointerPath().empty())
+ break;
+
+ const CXXRecordDecl *RD =
+ T->castAs<MemberPointerType>()->getMostRecentCXXRecordDecl();
+ const ValueDecl *D = V.getMemberPointerDecl();
+ if (T->isMemberDataPointerType())
+ mangleMemberDataPointer(RD, D, "");
+ else
+ mangleMemberFunctionPointer(RD, cast_or_null<CXXMethodDecl>(D), "");
+ return;
+ }
+
+ case APValue::Struct: {
+ Out << '2';
+ mangleType(T, SourceRange(), QMM_Escape);
+ const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ assert(RD && "unexpected type for record value");
+
+ unsigned BaseIndex = 0;
+ for (const CXXBaseSpecifier &B : RD->bases())
+ mangleTemplateArgValue(B.getType(), V.getStructBase(BaseIndex++));
+ for (const FieldDecl *FD : RD->fields())
+ if (!FD->isUnnamedBitfield())
+ mangleTemplateArgValue(FD->getType(),
+ V.getStructField(FD->getFieldIndex()),
+ /*WithScalarType*/ true);
+ Out << '@';
+ return;
+ }
+
+ case APValue::Union:
+ Out << '7';
+ mangleType(T, SourceRange(), QMM_Escape);
+ if (const FieldDecl *FD = V.getUnionField()) {
+ mangleUnqualifiedName(FD);
+ mangleTemplateArgValue(FD->getType(), V.getUnionValue());
+ }
+ Out << '@';
+ return;
+
+ case APValue::ComplexInt:
+ // We mangle complex types as structs, so mangle the value as a struct too.
+ Out << '2';
+ mangleType(T, SourceRange(), QMM_Escape);
+ Out << '0';
+ mangleNumber(V.getComplexIntReal());
+ Out << '0';
+ mangleNumber(V.getComplexIntImag());
+ Out << '@';
+ return;
+
+ case APValue::ComplexFloat:
+ Out << '2';
+ mangleType(T, SourceRange(), QMM_Escape);
+ mangleFloat(V.getComplexFloatReal());
+ mangleFloat(V.getComplexFloatImag());
+ Out << '@';
+ return;
+
+ case APValue::Array: {
+ Out << '3';
+ QualType ElemT = getASTContext().getAsArrayType(T)->getElementType();
+ mangleType(ElemT, SourceRange(), QMM_Escape);
+ for (unsigned I = 0, N = V.getArraySize(); I != N; ++I) {
+ const APValue &ElemV = I < V.getArrayInitializedElts()
+ ? V.getArrayInitializedElt(I)
+ : V.getArrayFiller();
+ mangleTemplateArgValue(ElemT, ElemV);
+ Out << '@';
+ }
+ Out << '@';
+ return;
+ }
+
+ case APValue::Vector: {
+ // __m128 is mangled as a struct containing an array. We follow this
+ // approach for all vector types.
+ Out << '2';
+ mangleType(T, SourceRange(), QMM_Escape);
+ Out << '3';
+ QualType ElemT = T->castAs<VectorType>()->getElementType();
+ mangleType(ElemT, SourceRange(), QMM_Escape);
+ for (unsigned I = 0, N = V.getVectorLength(); I != N; ++I) {
+ const APValue &ElemV = V.getVectorElt(I);
+ mangleTemplateArgValue(ElemT, ElemV);
+ Out << '@';
+ }
+ Out << "@@";
+ return;
+ }
+
+ case APValue::AddrLabelDiff:
+ case APValue::FixedPoint:
+ break;
+ }
+
+mangling_unknown:
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot mangle this template argument yet");
+ Diags.Report(DiagID);
+}
+
void MicrosoftCXXNameMangler::mangleObjCProtocol(const ObjCProtocolDecl *PD) {
llvm::SmallString<64> TemplateMangling;
llvm::raw_svector_ostream Stream(TemplateMangling);
@@ -1798,7 +2098,7 @@ void MicrosoftCXXNameMangler::mangleAddressSpaceType(QualType T,
// where:
// <language_addr_space> ::= <OpenCL-addrspace> | <CUDA-addrspace>
// <OpenCL-addrspace> ::= "CL" [ "global" | "local" | "constant" |
- // "private"| "generic" ]
+ // "private"| "generic" | "device" | "host" ]
// <CUDA-addrspace> ::= "CU" [ "device" | "constant" | "shared" ]
// Note that the above were chosen to match the Itanium mangling for this.
//
@@ -1814,8 +2114,7 @@ void MicrosoftCXXNameMangler::mangleAddressSpaceType(QualType T,
if (Context.getASTContext().addressSpaceMapManglingFor(AS)) {
unsigned TargetAS = Context.getASTContext().getTargetAddressSpace(AS);
Extra.mangleSourceName("_AS");
- Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(TargetAS),
- /*IsBoolean*/ false);
+ Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(TargetAS));
} else {
switch (AS) {
default:
@@ -1823,6 +2122,12 @@ void MicrosoftCXXNameMangler::mangleAddressSpaceType(QualType T,
case LangAS::opencl_global:
Extra.mangleSourceName("_ASCLglobal");
break;
+ case LangAS::opencl_global_device:
+ Extra.mangleSourceName("_ASCLdevice");
+ break;
+ case LangAS::opencl_global_host:
+ Extra.mangleSourceName("_ASCLhost");
+ break;
case LangAS::opencl_local:
Extra.mangleSourceName("_ASCLlocal");
break;
@@ -2090,6 +2395,9 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -2229,10 +2537,20 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
return;
}
Out << '@';
+ } else if (IsInLambda && D && isa<CXXConversionDecl>(D)) {
+ // The only lambda conversion operators are to function pointers, which
+ // can differ by their calling convention and are typically deduced. So
+ // we make sure that this type gets mangled properly.
+ mangleType(T->getReturnType(), Range, QMM_Result);
} else {
QualType ResultType = T->getReturnType();
- if (const auto *AT =
- dyn_cast_or_null<AutoType>(ResultType->getContainedAutoType())) {
+ if (IsInLambda && isa<CXXConversionDecl>(D)) {
+ // The only lambda conversion operators are to function pointers, which
+ // can differ by their calling convention and are typically deduced. So
+ // we make sure that this type gets mangled properly.
+ mangleType(ResultType, Range, QMM_Result);
+ } else if (const auto *AT = dyn_cast_or_null<AutoType>(
+ ResultType->getContainedAutoType())) {
Out << '?';
mangleQualifiers(ResultType.getLocalQualifiers(), /*IsMember=*/false);
Out << '?';
@@ -2701,8 +3019,7 @@ void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
Stream << "?$";
Extra.mangleSourceName("__vector");
Extra.mangleType(QualType(ET, 0), Range, QMM_Escape);
- Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumElements()),
- /*IsBoolean=*/false);
+ Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumElements()));
mangleArtificialTagType(TTK_Union, TemplateMangling, {"__clang"});
}
@@ -2941,7 +3258,7 @@ void MicrosoftCXXNameMangler::mangleType(const PipeType *T, Qualifiers,
Stream << "?$";
Extra.mangleSourceName("ocl_pipe");
Extra.mangleType(ElementType, Range, QMM_Escape);
- Extra.mangleIntegerLiteral(llvm::APSInt::get(T->isReadOnly()), true);
+ Extra.mangleIntegerLiteral(llvm::APSInt::get(T->isReadOnly()));
mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
@@ -2981,8 +3298,7 @@ void MicrosoftCXXNameMangler::mangleType(const ExtIntType *T, Qualifiers,
Extra.mangleSourceName("_UExtInt");
else
Extra.mangleSourceName("_ExtInt");
- Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumBits()),
- /*IsBoolean=*/false);
+ Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumBits()));
mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
diff --git a/clang/lib/AST/NSAPI.cpp b/clang/lib/AST/NSAPI.cpp
index ace7f1ceebe7..cf4b42d25148 100644
--- a/clang/lib/AST/NSAPI.cpp
+++ b/clang/lib/AST/NSAPI.cpp
@@ -474,6 +474,9 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
case BuiltinType::BoundMember:
case BuiltinType::Dependent:
case BuiltinType::Overload:
diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp
index a0b0dca55390..cab5db6244b6 100644
--- a/clang/lib/AST/OpenMPClause.cpp
+++ b/clang/lib/AST/OpenMPClause.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclOpenMP.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
@@ -31,28 +32,25 @@ OMPClause::child_range OMPClause::children() {
switch (getClauseKind()) {
default:
break;
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) \
case Enum: \
return static_cast<Class *>(this)->children();
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMP.inc"
}
llvm_unreachable("unknown OMPClause");
}
OMPClause::child_range OMPClause::used_children() {
switch (getClauseKind()) {
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) \
case Enum: \
return static_cast<Class *>(this)->used_children();
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
- case OMPC_threadprivate:
- case OMPC_uniform:
- case OMPC_device_type:
- case OMPC_match:
- case OMPC_unknown:
- break;
- default:
+#define CLAUSE_NO_CLASS(Enum, Str) \
+ case Enum: \
break;
+#include "llvm/Frontend/OpenMP/OMP.inc"
}
llvm_unreachable("unknown OMPClause");
}
@@ -1097,6 +1095,8 @@ OMPToClause *OMPToClause::Create(
const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs,
+ ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ ArrayRef<SourceLocation> MotionModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId) {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Vars.size();
@@ -1121,7 +1121,8 @@ OMPToClause *OMPToClause::Create(
Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
Sizes.NumComponents));
- auto *Clause = new (Mem) OMPToClause(UDMQualifierLoc, MapperId, Locs, Sizes);
+ auto *Clause = new (Mem) OMPToClause(MotionModifiers, MotionModifiersLoc,
+ UDMQualifierLoc, MapperId, Locs, Sizes);
Clause->setVarRefs(Vars);
Clause->setUDMapperRefs(UDMapperRefs);
@@ -1144,6 +1145,8 @@ OMPFromClause *OMPFromClause::Create(
const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs,
+ ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ ArrayRef<SourceLocation> MotionModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId) {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Vars.size();
@@ -1169,7 +1172,8 @@ OMPFromClause *OMPFromClause::Create(
Sizes.NumComponents));
auto *Clause =
- new (Mem) OMPFromClause(UDMQualifierLoc, MapperId, Locs, Sizes);
+ new (Mem) OMPFromClause(MotionModifiers, MotionModifiersLoc,
+ UDMQualifierLoc, MapperId, Locs, Sizes);
Clause->setVarRefs(Vars);
Clause->setUDMapperRefs(UDMapperRefs);
@@ -1936,6 +1940,17 @@ void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
OS << ")";
}
+template <typename T>
+static void PrintMapper(raw_ostream &OS, T *Node,
+ const PrintingPolicy &Policy) {
+ OS << '(';
+ NestedNameSpecifier *MapperNNS =
+ Node->getMapperQualifierLoc().getNestedNameSpecifier();
+ if (MapperNNS)
+ MapperNNS->print(OS, Policy);
+ OS << Node->getMapperIdInfo() << ')';
+}
+
void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
if (!Node->varlist_empty()) {
OS << "map(";
@@ -1944,14 +1959,8 @@ void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
if (Node->getMapTypeModifier(I) != OMPC_MAP_MODIFIER_unknown) {
OS << getOpenMPSimpleClauseTypeName(OMPC_map,
Node->getMapTypeModifier(I));
- if (Node->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_mapper) {
- OS << '(';
- NestedNameSpecifier *MapperNNS =
- Node->getMapperQualifierLoc().getNestedNameSpecifier();
- if (MapperNNS)
- MapperNNS->print(OS, Policy);
- OS << Node->getMapperIdInfo() << ')';
- }
+ if (Node->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_mapper)
+ PrintMapper(OS, Node, Policy);
OS << ',';
}
}
@@ -1963,44 +1972,41 @@ void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
}
}
-void OMPClausePrinter::VisitOMPToClause(OMPToClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "to";
- DeclarationNameInfo MapperId = Node->getMapperIdInfo();
- if (MapperId.getName() && !MapperId.getName().isEmpty()) {
- OS << '(';
- OS << "mapper(";
- NestedNameSpecifier *MapperNNS =
- Node->getMapperQualifierLoc().getNestedNameSpecifier();
- if (MapperNNS)
- MapperNNS->print(OS, Policy);
- OS << MapperId << "):";
- VisitOMPClauseList(Node, ' ');
- } else {
- VisitOMPClauseList(Node, '(');
+template <typename T> void OMPClausePrinter::VisitOMPMotionClause(T *Node) {
+ if (Node->varlist_empty())
+ return;
+ OS << getOpenMPClauseName(Node->getClauseKind());
+ unsigned ModifierCount = 0;
+ for (unsigned I = 0; I < NumberOfOMPMotionModifiers; ++I) {
+ if (Node->getMotionModifier(I) != OMPC_MOTION_MODIFIER_unknown)
+ ++ModifierCount;
+ }
+ if (ModifierCount) {
+ OS << '(';
+ for (unsigned I = 0; I < NumberOfOMPMotionModifiers; ++I) {
+ if (Node->getMotionModifier(I) != OMPC_MOTION_MODIFIER_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
+ Node->getMotionModifier(I));
+ if (Node->getMotionModifier(I) == OMPC_MOTION_MODIFIER_mapper)
+ PrintMapper(OS, Node, Policy);
+ if (I < ModifierCount - 1)
+ OS << ", ";
+ }
}
- OS << ")";
+ OS << ':';
+ VisitOMPClauseList(Node, ' ');
+ } else {
+ VisitOMPClauseList(Node, '(');
}
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPToClause(OMPToClause *Node) {
+ VisitOMPMotionClause(Node);
}
void OMPClausePrinter::VisitOMPFromClause(OMPFromClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "from";
- DeclarationNameInfo MapperId = Node->getMapperIdInfo();
- if (MapperId.getName() && !MapperId.getName().isEmpty()) {
- OS << '(';
- OS << "mapper(";
- NestedNameSpecifier *MapperNNS =
- Node->getMapperQualifierLoc().getNestedNameSpecifier();
- if (MapperNNS)
- MapperNNS->print(OS, Policy);
- OS << MapperId << "):";
- VisitOMPClauseList(Node, ' ');
- } else {
- VisitOMPClauseList(Node, '(');
- }
- OS << ")";
- }
+ VisitOMPMotionClause(Node);
}
void OMPClausePrinter::VisitOMPDistScheduleClause(OMPDistScheduleClause *Node) {
@@ -2127,27 +2133,29 @@ void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx,
TraitProperty::user_condition_unknown &&
"Ill-formed user condition, expected unknown trait property!");
- llvm::APSInt CondVal;
- if (Selector.ScoreOrCondition->isIntegerConstantExpr(CondVal, ASTCtx))
- VMI.addTrait(CondVal.isNullValue()
- ? TraitProperty::user_condition_false
- : TraitProperty::user_condition_true);
+ if (Optional<APSInt> CondVal =
+ Selector.ScoreOrCondition->getIntegerConstantExpr(ASTCtx))
+ VMI.addTrait(CondVal->isNullValue()
+ ? TraitProperty::user_condition_false
+ : TraitProperty::user_condition_true,
+ "<condition>");
else
- VMI.addTrait(TraitProperty::user_condition_false);
+ VMI.addTrait(TraitProperty::user_condition_false, "<condition>");
continue;
}
- llvm::APSInt Score;
+ Optional<llvm::APSInt> Score;
llvm::APInt *ScorePtr = nullptr;
if (Selector.ScoreOrCondition) {
- if (Selector.ScoreOrCondition->isIntegerConstantExpr(Score, ASTCtx))
- ScorePtr = &Score;
+ if ((Score = Selector.ScoreOrCondition->getIntegerConstantExpr(ASTCtx)))
+ ScorePtr = &*Score;
else
- VMI.addTrait(TraitProperty::user_condition_false);
+ VMI.addTrait(TraitProperty::user_condition_false,
+ "<non-constant-score>");
}
for (const OMPTraitProperty &Property : Selector.Properties)
- VMI.addTrait(Set.Kind, Property.Kind, ScorePtr);
+ VMI.addTrait(Set.Kind, Property.Kind, Property.RawString, ScorePtr);
if (Set.Kind != TraitSet::construct)
continue;
@@ -2190,7 +2198,10 @@ void OMPTraitInfo::print(llvm::raw_ostream &OS,
OS << "(";
if (Selector.Kind == TraitSelector::user_condition) {
- Selector.ScoreOrCondition->printPretty(OS, nullptr, Policy);
+ if (Selector.ScoreOrCondition)
+ Selector.ScoreOrCondition->printPretty(OS, nullptr, Policy);
+ else
+ OS << "...";
} else {
if (Selector.ScoreOrCondition) {
@@ -2204,7 +2215,8 @@ void OMPTraitInfo::print(llvm::raw_ostream &OS,
if (!FirstProperty)
OS << ", ";
FirstProperty = false;
- OS << getOpenMPContextTraitPropertyName(Property.Kind);
+ OS << getOpenMPContextTraitPropertyName(Property.Kind,
+ Property.RawString);
}
}
OS << ")";
@@ -2231,7 +2243,9 @@ std::string OMPTraitInfo::getMangledName() const {
continue;
for (const OMPTraitProperty &Property : Selector.Properties)
- OS << '$' << 'P' << getOpenMPContextTraitPropertyName(Property.Kind);
+ OS << '$' << 'P'
+ << getOpenMPContextTraitPropertyName(Property.Kind,
+ Property.RawString);
}
}
return OS.str();
@@ -2261,9 +2275,10 @@ OMPTraitInfo::OMPTraitInfo(StringRef MangledName) {
Selector.Properties.push_back(OMPTraitProperty());
OMPTraitProperty &Property = Selector.Properties.back();
std::pair<StringRef, StringRef> PropRestPair = MangledName.split('$');
- Property.Kind =
- getOpenMPContextTraitPropertyKind(Set.Kind, PropRestPair.first);
- MangledName = PropRestPair.second;
+ Property.RawString = PropRestPair.first;
+ Property.Kind = getOpenMPContextTraitPropertyKind(
+ Set.Kind, Selector.Kind, PropRestPair.first);
+ MangledName = MangledName.drop_front(PropRestPair.first.size());
} while (true);
} while (true);
} while (true);
@@ -2280,3 +2295,24 @@ llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS,
const OMPTraitInfo *TI) {
return TI ? OS << *TI : OS;
}
+
+TargetOMPContext::TargetOMPContext(
+ ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait,
+ const FunctionDecl *CurrentFunctionDecl)
+ : OMPContext(ASTCtx.getLangOpts().OpenMPIsDevice,
+ ASTCtx.getTargetInfo().getTriple()),
+ FeatureValidityCheck([&](StringRef FeatureName) {
+ return ASTCtx.getTargetInfo().isValidFeatureName(FeatureName);
+ }),
+ DiagUnknownTrait(std::move(DiagUnknownTrait)) {
+ ASTCtx.getFunctionFeatureMap(FeatureMap, CurrentFunctionDecl);
+}
+
+bool TargetOMPContext::matchesISATrait(StringRef RawString) const {
+ auto It = FeatureMap.find(RawString);
+ if (It != FeatureMap.end())
+ return It->second;
+ if (!FeatureValidityCheck(RawString))
+ DiagUnknownTrait(RawString);
+ return false;
+}
diff --git a/clang/lib/AST/ParentMapContext.cpp b/clang/lib/AST/ParentMapContext.cpp
index b73b32774b53..cb4995312efa 100644
--- a/clang/lib/AST/ParentMapContext.cpp
+++ b/clang/lib/AST/ParentMapContext.cpp
@@ -36,8 +36,6 @@ Expr *ParentMapContext::traverseIgnored(Expr *E) const {
switch (Traversal) {
case TK_AsIs:
return E;
- case TK_IgnoreImplicitCastsAndParentheses:
- return E->IgnoreParenImpCasts();
case TK_IgnoreUnlessSpelledInSource:
return E->IgnoreUnlessSpelledInSource();
}
@@ -154,8 +152,13 @@ public:
auto SR = Child->getSourceRange();
+ if (const auto *C = dyn_cast<CXXFunctionalCastExpr>(E)) {
+ if (C->getSourceRange() == SR)
+ return true;
+ }
+
if (const auto *C = dyn_cast<CXXConstructExpr>(E)) {
- if (C->getSourceRange() == SR || !isa<CXXTemporaryObjectExpr>(C))
+ if (C->getSourceRange() == SR || C->isElidable())
return true;
}
@@ -211,8 +214,6 @@ DynTypedNode createDynTypedNode(const NestedNameSpecifierLoc &Node) {
/// Note that the relationship described here is purely in terms of AST
/// traversal - there are other relationships (for example declaration context)
/// in the AST that are better modeled by special matchers.
-///
-/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
class ParentMapContext::ParentMap::ASTVisitor
: public RecursiveASTVisitor<ASTVisitor> {
public:
@@ -227,51 +228,60 @@ private:
bool shouldVisitImplicitCode() const { return true; }
+ /// Record the parent of the node we're visiting.
+ /// MapNode is the child, the parent is on top of ParentStack.
+ /// Parents is the parent storage (either PointerParents or OtherParents).
+ template <typename MapNodeTy, typename MapTy>
+ void addParent(MapNodeTy MapNode, MapTy *Parents) {
+ if (ParentStack.empty())
+ return;
+
+ // FIXME: Currently we add the same parent multiple times, but only
+ // when no memoization data is available for the type.
+ // For example when we visit all subexpressions of template
+ // instantiations; this is suboptimal, but benign: the only way to
+ // visit those is with hasAncestor / hasParent, and those do not create
+ // new matches.
+ // The plan is to enable DynTypedNode to be storable in a map or hash
+ // map. The main problem there is to implement hash functions /
+ // comparison operators for all types that DynTypedNode supports that
+ // do not have pointer identity.
+ auto &NodeOrVector = (*Parents)[MapNode];
+ if (NodeOrVector.isNull()) {
+ if (const auto *D = ParentStack.back().get<Decl>())
+ NodeOrVector = D;
+ else if (const auto *S = ParentStack.back().get<Stmt>())
+ NodeOrVector = S;
+ else
+ NodeOrVector = new DynTypedNode(ParentStack.back());
+ } else {
+ if (!NodeOrVector.template is<ParentVector *>()) {
+ auto *Vector = new ParentVector(
+ 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
+ delete NodeOrVector.template dyn_cast<DynTypedNode *>();
+ NodeOrVector = Vector;
+ }
+
+ auto *Vector = NodeOrVector.template get<ParentVector *>();
+ // Skip duplicates for types that have memoization data.
+ // We must check that the type has memoization data before calling
+ // std::find() because DynTypedNode::operator== can't compare all
+ // types.
+ bool Found = ParentStack.back().getMemoizationData() &&
+ std::find(Vector->begin(), Vector->end(),
+ ParentStack.back()) != Vector->end();
+ if (!Found)
+ Vector->push_back(ParentStack.back());
+ }
+ }
+
template <typename T, typename MapNodeTy, typename BaseTraverseFn,
typename MapTy>
bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse,
MapTy *Parents) {
if (!Node)
return true;
- if (ParentStack.size() > 0) {
- // FIXME: Currently we add the same parent multiple times, but only
- // when no memoization data is available for the type.
- // For example when we visit all subexpressions of template
- // instantiations; this is suboptimal, but benign: the only way to
- // visit those is with hasAncestor / hasParent, and those do not create
- // new matches.
- // The plan is to enable DynTypedNode to be storable in a map or hash
- // map. The main problem there is to implement hash functions /
- // comparison operators for all types that DynTypedNode supports that
- // do not have pointer identity.
- auto &NodeOrVector = (*Parents)[MapNode];
- if (NodeOrVector.isNull()) {
- if (const auto *D = ParentStack.back().get<Decl>())
- NodeOrVector = D;
- else if (const auto *S = ParentStack.back().get<Stmt>())
- NodeOrVector = S;
- else
- NodeOrVector = new DynTypedNode(ParentStack.back());
- } else {
- if (!NodeOrVector.template is<ParentVector *>()) {
- auto *Vector = new ParentVector(
- 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
- delete NodeOrVector.template dyn_cast<DynTypedNode *>();
- NodeOrVector = Vector;
- }
-
- auto *Vector = NodeOrVector.template get<ParentVector *>();
- // Skip duplicates for types that have memoization data.
- // We must check that the type has memoization data before calling
- // std::find() because DynTypedNode::operator== can't compare all
- // types.
- bool Found = ParentStack.back().getMemoizationData() &&
- std::find(Vector->begin(), Vector->end(),
- ParentStack.back()) != Vector->end();
- if (!Found)
- Vector->push_back(ParentStack.back());
- }
- }
+ addParent(MapNode, Parents);
ParentStack.push_back(createDynTypedNode(Node));
bool Result = BaseTraverse();
ParentStack.pop_back();
@@ -283,20 +293,12 @@ private:
DeclNode, DeclNode, [&] { return VisitorBase::TraverseDecl(DeclNode); },
&Map.PointerParents);
}
-
- bool TraverseStmt(Stmt *StmtNode) {
- return TraverseNode(StmtNode, StmtNode,
- [&] { return VisitorBase::TraverseStmt(StmtNode); },
- &Map.PointerParents);
- }
-
bool TraverseTypeLoc(TypeLoc TypeLocNode) {
return TraverseNode(
TypeLocNode, DynTypedNode::create(TypeLocNode),
[&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
&Map.OtherParents);
}
-
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
return TraverseNode(
NNSLocNode, DynTypedNode::create(NNSLocNode),
@@ -304,6 +306,17 @@ private:
&Map.OtherParents);
}
+ // Using generic TraverseNode for Stmt would prevent data-recursion.
+ bool dataTraverseStmtPre(Stmt *StmtNode) {
+ addParent(StmtNode, &Map.PointerParents);
+ ParentStack.push_back(DynTypedNode::create(*StmtNode));
+ return true;
+ }
+ bool dataTraverseStmtPost(Stmt *StmtNode) {
+ ParentStack.pop_back();
+ return true;
+ }
+
ParentMap &Map;
llvm::SmallVector<DynTypedNode, 16> ParentStack;
};
diff --git a/clang/lib/AST/PrintfFormatString.cpp b/clang/lib/AST/PrintfFormatString.cpp
index f3ac181214ac..a1abaf2f0943 100644
--- a/clang/lib/AST/PrintfFormatString.cpp
+++ b/clang/lib/AST/PrintfFormatString.cpp
@@ -791,6 +791,9 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
#define SIGNED_TYPE(Id, SingletonId)
#define UNSIGNED_TYPE(Id, SingletonId)
#define FLOATING_TYPE(Id, SingletonId)
diff --git a/clang/lib/AST/RecordLayout.cpp b/clang/lib/AST/RecordLayout.cpp
index e7b500e1902d..8f70a2072926 100644
--- a/clang/lib/AST/RecordLayout.cpp
+++ b/clang/lib/AST/RecordLayout.cpp
@@ -29,45 +29,42 @@ void ASTRecordLayout::Destroy(ASTContext &Ctx) {
ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
CharUnits alignment,
+ CharUnits preferredAlignment,
CharUnits unadjustedAlignment,
CharUnits requiredAlignment,
CharUnits datasize,
ArrayRef<uint64_t> fieldoffsets)
: Size(size), DataSize(datasize), Alignment(alignment),
+ PreferredAlignment(preferredAlignment),
UnadjustedAlignment(unadjustedAlignment),
RequiredAlignment(requiredAlignment) {
FieldOffsets.append(Ctx, fieldoffsets.begin(), fieldoffsets.end());
}
// Constructor for C++ records.
-ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
- CharUnits size, CharUnits alignment,
- CharUnits unadjustedAlignment,
- CharUnits requiredAlignment,
- bool hasOwnVFPtr, bool hasExtendableVFPtr,
- CharUnits vbptroffset,
- CharUnits datasize,
- ArrayRef<uint64_t> fieldoffsets,
- CharUnits nonvirtualsize,
- CharUnits nonvirtualalignment,
- CharUnits SizeOfLargestEmptySubobject,
- const CXXRecordDecl *PrimaryBase,
- bool IsPrimaryBaseVirtual,
- const CXXRecordDecl *BaseSharingVBPtr,
- bool EndsWithZeroSizedObject,
- bool LeadsWithZeroSizedBase,
- const BaseOffsetsMapTy& BaseOffsets,
- const VBaseOffsetsMapTy& VBaseOffsets)
- : Size(size), DataSize(datasize), Alignment(alignment),
- UnadjustedAlignment(unadjustedAlignment),
- RequiredAlignment(requiredAlignment), CXXInfo(new (Ctx) CXXRecordLayoutInfo)
-{
+ASTRecordLayout::ASTRecordLayout(
+ const ASTContext &Ctx, CharUnits size, CharUnits alignment,
+ CharUnits preferredAlignment, CharUnits unadjustedAlignment,
+ CharUnits requiredAlignment, bool hasOwnVFPtr, bool hasExtendableVFPtr,
+ CharUnits vbptroffset, CharUnits datasize, ArrayRef<uint64_t> fieldoffsets,
+ CharUnits nonvirtualsize, CharUnits nonvirtualalignment,
+ CharUnits preferrednvalignment, CharUnits SizeOfLargestEmptySubobject,
+ const CXXRecordDecl *PrimaryBase, bool IsPrimaryBaseVirtual,
+ const CXXRecordDecl *BaseSharingVBPtr, bool EndsWithZeroSizedObject,
+ bool LeadsWithZeroSizedBase, const BaseOffsetsMapTy &BaseOffsets,
+ const VBaseOffsetsMapTy &VBaseOffsets)
+ : Size(size), DataSize(datasize), Alignment(alignment),
+ PreferredAlignment(preferredAlignment),
+ UnadjustedAlignment(unadjustedAlignment),
+ RequiredAlignment(requiredAlignment),
+ CXXInfo(new (Ctx) CXXRecordLayoutInfo) {
FieldOffsets.append(Ctx, fieldoffsets.begin(), fieldoffsets.end());
CXXInfo->PrimaryBase.setPointer(PrimaryBase);
CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual);
CXXInfo->NonVirtualSize = nonvirtualsize;
CXXInfo->NonVirtualAlignment = nonvirtualalignment;
+ CXXInfo->PreferredNVAlignment = preferrednvalignment;
CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
CXXInfo->BaseOffsets = BaseOffsets;
CXXInfo->VBaseOffsets = VBaseOffsets;
diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp
index d56c7e2ab8c0..95d69fa5b11a 100644
--- a/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -6,7 +6,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/RecordLayout.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/Attr.h"
@@ -16,6 +15,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/VTableBuilder.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/Format.h"
@@ -589,6 +589,9 @@ protected:
/// Alignment - The current alignment of the record layout.
CharUnits Alignment;
+ /// PreferredAlignment - The preferred alignment of the record layout.
+ CharUnits PreferredAlignment;
+
/// The alignment if attribute packed is not used.
CharUnits UnpackedAlignment;
@@ -612,6 +615,8 @@ protected:
unsigned IsMac68kAlign : 1;
+ unsigned IsNaturalAlign : 1;
+
unsigned IsMsStruct : 1;
/// UnfilledBitsInLastUnit - If the last field laid out was a bitfield,
@@ -619,9 +624,10 @@ protected:
/// an adjacent bitfield if necessary. The unit in question is usually
/// a byte, but larger units are used if IsMsStruct.
unsigned char UnfilledBitsInLastUnit;
- /// LastBitfieldTypeSize - If IsMsStruct, represents the size of the type
- /// of the previous field if it was a bitfield.
- unsigned char LastBitfieldTypeSize;
+
+ /// LastBitfieldStorageUnitSize - If IsMsStruct, represents the size of the
+ /// storage unit of the previous field if it was a bitfield.
+ unsigned char LastBitfieldStorageUnitSize;
/// MaxFieldAlignment - The maximum allowed field alignment. This is set by
/// #pragma pack.
@@ -632,6 +638,7 @@ protected:
CharUnits NonVirtualSize;
CharUnits NonVirtualAlignment;
+ CharUnits PreferredNVAlignment;
/// If we've laid out a field but not included its tail padding in Size yet,
/// this is the size up to the end of that field.
@@ -652,6 +659,12 @@ protected:
/// the flag of field offset changing due to packed attribute.
bool HasPackedField;
+ /// HandledFirstNonOverlappingEmptyField - An auxiliary field used for AIX.
+ /// When there are OverlappingEmptyFields existing in the aggregate, the
+ /// flag shows if the following first non-empty or empty-but-non-overlapping
+ /// field has been handled, if any.
+ bool HandledFirstNonOverlappingEmptyField;
+
typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
/// Bases - base classes and their offsets in the record.
@@ -678,17 +691,21 @@ protected:
ItaniumRecordLayoutBuilder(const ASTContext &Context,
EmptySubobjectMap *EmptySubobjects)
: Context(Context), EmptySubobjects(EmptySubobjects), Size(0),
- Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()),
- UnadjustedAlignment(CharUnits::One()),
- UseExternalLayout(false), InferAlignment(false), Packed(false),
- IsUnion(false), IsMac68kAlign(false), IsMsStruct(false),
- UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0),
- MaxFieldAlignment(CharUnits::Zero()), DataSize(0),
- NonVirtualSize(CharUnits::Zero()),
+ Alignment(CharUnits::One()), PreferredAlignment(CharUnits::One()),
+ UnpackedAlignment(CharUnits::One()),
+ UnadjustedAlignment(CharUnits::One()), UseExternalLayout(false),
+ InferAlignment(false), Packed(false), IsUnion(false),
+ IsMac68kAlign(false),
+ IsNaturalAlign(!Context.getTargetInfo().getTriple().isOSAIX()),
+ IsMsStruct(false), UnfilledBitsInLastUnit(0),
+ LastBitfieldStorageUnitSize(0), MaxFieldAlignment(CharUnits::Zero()),
+ DataSize(0), NonVirtualSize(CharUnits::Zero()),
NonVirtualAlignment(CharUnits::One()),
+ PreferredNVAlignment(CharUnits::One()),
PaddedFieldSize(CharUnits::Zero()), PrimaryBase(nullptr),
- PrimaryBaseIsVirtual(false), HasOwnVFPtr(false),
- HasPackedField(false), FirstNearlyEmptyVBase(nullptr) {}
+ PrimaryBaseIsVirtual(false), HasOwnVFPtr(false), HasPackedField(false),
+ HandledFirstNonOverlappingEmptyField(false),
+ FirstNearlyEmptyVBase(nullptr) {}
void Layout(const RecordDecl *D);
void Layout(const CXXRecordDecl *D);
@@ -696,7 +713,7 @@ protected:
void LayoutFields(const RecordDecl *D);
void LayoutField(const FieldDecl *D, bool InsertExtraPadding);
- void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize,
+ void LayoutWideBitField(uint64_t FieldSize, uint64_t StorageUnitSize,
bool FieldPacked, const FieldDecl *D);
void LayoutBitField(const FieldDecl *D);
@@ -763,9 +780,13 @@ protected:
/// alignment.
void FinishLayout(const NamedDecl *D);
- void UpdateAlignment(CharUnits NewAlignment, CharUnits UnpackedNewAlignment);
+ void UpdateAlignment(CharUnits NewAlignment, CharUnits UnpackedNewAlignment,
+ CharUnits PreferredAlignment);
+ void UpdateAlignment(CharUnits NewAlignment, CharUnits UnpackedNewAlignment) {
+ UpdateAlignment(NewAlignment, UnpackedNewAlignment, NewAlignment);
+ }
void UpdateAlignment(CharUnits NewAlignment) {
- UpdateAlignment(NewAlignment, NewAlignment);
+ UpdateAlignment(NewAlignment, NewAlignment, NewAlignment);
}
/// Retrieve the externally-supplied field offset for the given
@@ -998,7 +1019,7 @@ void ItaniumRecordLayoutBuilder::EnsureVTablePointerAlignment(
setSize(getSize().alignTo(BaseAlign));
// Update the alignment.
- UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign, BaseAlign);
}
void ItaniumRecordLayoutBuilder::LayoutNonVirtualBases(
@@ -1044,6 +1065,10 @@ void ItaniumRecordLayoutBuilder::LayoutNonVirtualBases(
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
EnsureVTablePointerAlignment(PtrAlign);
HasOwnVFPtr = true;
+
+ assert(!IsUnion && "Unions cannot be dynamic classes.");
+ HandledFirstNonOverlappingEmptyField = true;
+
setSize(getSize() + PtrWidth);
setDataSize(getSize());
}
@@ -1179,9 +1204,9 @@ void ItaniumRecordLayoutBuilder::LayoutVirtualBase(
CharUnits
ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
-
+ assert(!IsUnion && "Unions cannot have base classes.");
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
CharUnits Offset;
// Query the external layout to see if it provides an offset.
@@ -1193,45 +1218,77 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset);
}
- // Clang <= 6 incorrectly applied the 'packed' attribute to base classes.
- // Per GCC's documentation, it only applies to non-static data members.
+ auto getBaseOrPreferredBaseAlignFromUnpacked = [&](CharUnits UnpackedAlign) {
+ // Clang <= 6 incorrectly applied the 'packed' attribute to base classes.
+ // Per GCC's documentation, it only applies to non-static data members.
+ return (Packed && ((Context.getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver6) ||
+ Context.getTargetInfo().getTriple().isPS4() ||
+ Context.getTargetInfo().getTriple().isOSAIX()))
+ ? CharUnits::One()
+ : UnpackedAlign;
+ };
+
CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlignment();
+ CharUnits UnpackedPreferredBaseAlign = Layout.getPreferredNVAlignment();
CharUnits BaseAlign =
- (Packed && ((Context.getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver6) ||
- Context.getTargetInfo().getTriple().isPS4()))
- ? CharUnits::One()
- : UnpackedBaseAlign;
+ getBaseOrPreferredBaseAlignFromUnpacked(UnpackedBaseAlign);
+ CharUnits PreferredBaseAlign =
+ getBaseOrPreferredBaseAlignFromUnpacked(UnpackedPreferredBaseAlign);
+
+ const bool DefaultsToAIXPowerAlignment =
+ Context.getTargetInfo().defaultsToAIXPowerAlignment();
+ if (DefaultsToAIXPowerAlignment) {
+ // AIX `power` alignment does not apply the preferred alignment for
+ // non-union classes if the source of the alignment (the current base in
+ // this context) follows introduction of the first subobject with
+ // exclusively allocated space or zero-extent array.
+ if (!Base->Class->isEmpty() && !HandledFirstNonOverlappingEmptyField) {
+ // By handling a base class that is not empty, we're handling the
+ // "first (inherited) member".
+ HandledFirstNonOverlappingEmptyField = true;
+ } else if (!IsNaturalAlign) {
+ UnpackedPreferredBaseAlign = UnpackedBaseAlign;
+ PreferredBaseAlign = BaseAlign;
+ }
+ }
+ CharUnits UnpackedAlignTo = !DefaultsToAIXPowerAlignment
+ ? UnpackedBaseAlign
+ : UnpackedPreferredBaseAlign;
// If we have an empty base class, try to place it at offset 0.
if (Base->Class->isEmpty() &&
(!HasExternalLayout || Offset == CharUnits::Zero()) &&
EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) {
setSize(std::max(getSize(), Layout.getSize()));
- UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+ UpdateAlignment(BaseAlign, UnpackedAlignTo, PreferredBaseAlign);
return CharUnits::Zero();
}
- // The maximum field alignment overrides base align.
+ // The maximum field alignment overrides the base align/(AIX-only) preferred
+ // base align.
if (!MaxFieldAlignment.isZero()) {
BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
- UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
+ PreferredBaseAlign = std::min(PreferredBaseAlign, MaxFieldAlignment);
+ UnpackedAlignTo = std::min(UnpackedAlignTo, MaxFieldAlignment);
}
+ CharUnits AlignTo =
+ !DefaultsToAIXPowerAlignment ? BaseAlign : PreferredBaseAlign;
if (!HasExternalLayout) {
// Round up the current record size to the base's alignment boundary.
- Offset = getDataSize().alignTo(BaseAlign);
+ Offset = getDataSize().alignTo(AlignTo);
// Try to place the base.
while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset))
- Offset += BaseAlign;
+ Offset += AlignTo;
} else {
bool Allowed = EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset);
(void)Allowed;
assert(Allowed && "Base subobject externally placed at overlapping offset");
- if (InferAlignment && Offset < getDataSize().alignTo(BaseAlign)) {
+ if (InferAlignment && Offset < getDataSize().alignTo(AlignTo)) {
// The externally-supplied base offset is before the base offset we
// computed. Assume that the structure is packed.
Alignment = CharUnits::One();
@@ -1248,7 +1305,7 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
setSize(std::max(getSize(), Offset + Layout.getSize()));
// Remember max struct/class alignment.
- UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+ UpdateAlignment(BaseAlign, UnpackedAlignTo, PreferredBaseAlign);
return Offset;
}
@@ -1271,10 +1328,17 @@ void ItaniumRecordLayoutBuilder::InitializeLayout(const Decl *D) {
// allude to additional (more complicated) semantics, especially with regard
// to bit-fields, but gcc appears not to follow that.
if (D->hasAttr<AlignMac68kAttr>()) {
+ assert(
+ !D->hasAttr<AlignNaturalAttr>() &&
+ "Having both mac68k and natural alignment on a decl is not allowed.");
IsMac68kAlign = true;
MaxFieldAlignment = CharUnits::fromQuantity(2);
Alignment = CharUnits::fromQuantity(2);
+ PreferredAlignment = CharUnits::fromQuantity(2);
} else {
+ if (D->hasAttr<AlignNaturalAttr>())
+ IsNaturalAlign = true;
+
if (const MaxFieldAlignmentAttr *MFAA = D->getAttr<MaxFieldAlignmentAttr>())
MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment());
@@ -1282,6 +1346,9 @@ void ItaniumRecordLayoutBuilder::InitializeLayout(const Decl *D) {
UpdateAlignment(Context.toCharUnitsFromBits(MaxAlign));
}
+ HandledFirstNonOverlappingEmptyField =
+ !Context.getTargetInfo().defaultsToAIXPowerAlignment() || IsNaturalAlign;
+
// If there is an external AST source, ask it for the various offsets.
if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
if (ExternalASTSource *Source = Context.getExternalSource()) {
@@ -1293,6 +1360,7 @@ void ItaniumRecordLayoutBuilder::InitializeLayout(const Decl *D) {
if (UseExternalLayout) {
if (External.Align > 0) {
Alignment = Context.toCharUnitsFromBits(External.Align);
+ PreferredAlignment = Context.toCharUnitsFromBits(External.Align);
} else {
// The external source didn't have alignment information; infer it.
InferAlignment = true;
@@ -1321,6 +1389,7 @@ void ItaniumRecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
NonVirtualSize = Context.toCharUnitsFromBits(
llvm::alignTo(getSizeInBits(), Context.getTargetInfo().getCharAlign()));
NonVirtualAlignment = Alignment;
+ PreferredNVAlignment = PreferredAlignment;
// Lay out the virtual bases and add the primary virtual base offsets.
LayoutVirtualBases(RD, RD);
@@ -1394,7 +1463,7 @@ roundUpSizeToCharAlignment(uint64_t Size,
}
void ItaniumRecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
- uint64_t TypeSize,
+ uint64_t StorageUnitSize,
bool FieldPacked,
const FieldDecl *D) {
assert(Context.getLangOpts().CPlusPlus &&
@@ -1424,7 +1493,7 @@ void ItaniumRecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
// We're not going to use any of the unfilled bits in the last byte.
UnfilledBitsInLastUnit = 0;
- LastBitfieldTypeSize = 0;
+ LastBitfieldStorageUnitSize = 0;
uint64_t FieldOffset;
uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit;
@@ -1463,7 +1532,7 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
uint64_t FieldSize = D->getBitWidthValue(Context);
TypeInfo FieldInfo = Context.getTypeInfo(D->getType());
- uint64_t TypeSize = FieldInfo.Width;
+ uint64_t StorageUnitSize = FieldInfo.Width;
unsigned FieldAlign = FieldInfo.Align;
// UnfilledBitsInLastUnit is the difference between the end of the
@@ -1472,7 +1541,7 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// first bit offset available for non-bitfields). The current data
// size in bits is always a multiple of the char size; additionally,
// for ms_struct records it's also a multiple of the
- // LastBitfieldTypeSize (if set).
+ // LastBitfieldStorageUnitSize (if set).
// The struct-layout algorithm is dictated by the platform ABI,
// which in principle could use almost any rules it likes. In
@@ -1526,26 +1595,26 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// First, some simple bookkeeping to perform for ms_struct structs.
if (IsMsStruct) {
// The field alignment for integer types is always the size.
- FieldAlign = TypeSize;
+ FieldAlign = StorageUnitSize;
// If the previous field was not a bitfield, or was a bitfield
// with a different storage unit size, or if this field doesn't fit into
// the current storage unit, we're done with that storage unit.
- if (LastBitfieldTypeSize != TypeSize ||
+ if (LastBitfieldStorageUnitSize != StorageUnitSize ||
UnfilledBitsInLastUnit < FieldSize) {
// Also, ignore zero-length bitfields after non-bitfields.
- if (!LastBitfieldTypeSize && !FieldSize)
+ if (!LastBitfieldStorageUnitSize && !FieldSize)
FieldAlign = 1;
UnfilledBitsInLastUnit = 0;
- LastBitfieldTypeSize = 0;
+ LastBitfieldStorageUnitSize = 0;
}
}
// If the field is wider than its declared type, it follows
// different rules in all cases.
- if (FieldSize > TypeSize) {
- LayoutWideBitField(FieldSize, TypeSize, FieldPacked, D);
+ if (FieldSize > StorageUnitSize) {
+ LayoutWideBitField(FieldSize, StorageUnitSize, FieldPacked, D);
return;
}
@@ -1629,7 +1698,7 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// Compute the real offset.
if (FieldSize == 0 ||
(AllowPadding &&
- (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)) {
+ (FieldOffset & (FieldAlign - 1)) + FieldSize > StorageUnitSize)) {
FieldOffset = llvm::alignTo(FieldOffset, FieldAlign);
} else if (ExplicitFieldAlign &&
(MaxFieldAlignmentInBits == 0 ||
@@ -1643,7 +1712,8 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// Repeat the computation for diagnostic purposes.
if (FieldSize == 0 ||
(AllowPadding &&
- (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize))
+ (UnpackedFieldOffset & (UnpackedFieldAlign - 1)) + FieldSize >
+ StorageUnitSize))
UnpackedFieldOffset =
llvm::alignTo(UnpackedFieldOffset, UnpackedFieldAlign);
else if (ExplicitFieldAlign &&
@@ -1684,11 +1754,11 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// is a zero-width bitfield, in which case just use a size of 1.
uint64_t RoundedFieldSize;
if (IsMsStruct) {
- RoundedFieldSize =
- (FieldSize ? TypeSize : Context.getTargetInfo().getCharWidth());
+ RoundedFieldSize = (FieldSize ? StorageUnitSize
+ : Context.getTargetInfo().getCharWidth());
- // Otherwise, allocate just the number of bytes required to store
- // the bitfield.
+ // Otherwise, allocate just the number of bytes required to store
+ // the bitfield.
} else {
RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize, Context);
}
@@ -1700,15 +1770,15 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// We should have cleared UnfilledBitsInLastUnit in every case
// where we changed storage units.
if (!UnfilledBitsInLastUnit) {
- setDataSize(FieldOffset + TypeSize);
- UnfilledBitsInLastUnit = TypeSize;
+ setDataSize(FieldOffset + StorageUnitSize);
+ UnfilledBitsInLastUnit = StorageUnitSize;
}
UnfilledBitsInLastUnit -= FieldSize;
- LastBitfieldTypeSize = TypeSize;
+ LastBitfieldStorageUnitSize = StorageUnitSize;
- // Otherwise, bump the data size up to include the bitfield,
- // including padding up to char alignment, and then remember how
- // bits we didn't use.
+ // Otherwise, bump the data size up to include the bitfield,
+ // including padding up to char alignment, and then remember how
+ // bits we didn't use.
} else {
uint64_t NewSizeInBits = FieldOffset + FieldSize;
uint64_t CharAlignment = Context.getTargetInfo().getCharAlign();
@@ -1718,7 +1788,7 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// The only time we can get here for an ms_struct is if this is a
// zero-width bitfield, which doesn't count as anything for the
// purposes of unfilled bits.
- LastBitfieldTypeSize = 0;
+ LastBitfieldStorageUnitSize = 0;
}
// Update the size.
@@ -1733,25 +1803,46 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
bool InsertExtraPadding) {
+ auto *FieldClass = D->getType()->getAsCXXRecordDecl();
+ bool PotentiallyOverlapping = D->hasAttr<NoUniqueAddressAttr>() && FieldClass;
+ bool IsOverlappingEmptyField =
+ PotentiallyOverlapping && FieldClass->isEmpty();
+
+ CharUnits FieldOffset =
+ (IsUnion || IsOverlappingEmptyField) ? CharUnits::Zero() : getDataSize();
+
+ const bool DefaultsToAIXPowerAlignment =
+ Context.getTargetInfo().defaultsToAIXPowerAlignment();
+ bool FoundFirstNonOverlappingEmptyFieldForAIX = false;
+ if (DefaultsToAIXPowerAlignment && !HandledFirstNonOverlappingEmptyField) {
+ assert(FieldOffset == CharUnits::Zero() &&
+ "The first non-overlapping empty field should have been handled.");
+
+ if (!IsOverlappingEmptyField) {
+ FoundFirstNonOverlappingEmptyFieldForAIX = true;
+
+ // We're going to handle the "first member" based on
+ // `FoundFirstNonOverlappingEmptyFieldForAIX` during the current
+ // invocation of this function; record it as handled for future
+ // invocations (except for unions, because the current field does not
+ // represent all "firsts").
+ HandledFirstNonOverlappingEmptyField = !IsUnion;
+ }
+ }
+
if (D->isBitField()) {
LayoutBitField(D);
return;
}
uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit;
-
// Reset the unfilled bits.
UnfilledBitsInLastUnit = 0;
- LastBitfieldTypeSize = 0;
+ LastBitfieldStorageUnitSize = 0;
- auto *FieldClass = D->getType()->getAsCXXRecordDecl();
- bool PotentiallyOverlapping = D->hasAttr<NoUniqueAddressAttr>() && FieldClass;
- bool IsOverlappingEmptyField = PotentiallyOverlapping && FieldClass->isEmpty();
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
- CharUnits FieldOffset = (IsUnion || IsOverlappingEmptyField)
- ? CharUnits::Zero()
- : getDataSize();
+ bool AlignIsRequired = false;
CharUnits FieldSize;
CharUnits FieldAlign;
// The amount of this class's dsize occupied by the field.
@@ -1759,25 +1850,26 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// into the field's tail padding.
CharUnits EffectiveFieldSize;
+ auto setDeclInfo = [&](bool IsIncompleteArrayType) {
+ auto TI = Context.getTypeInfoInChars(D->getType());
+ FieldAlign = TI.Align;
+ // Flexible array members don't have any size, but they have to be
+ // aligned appropriately for their element type.
+ EffectiveFieldSize = FieldSize =
+ IsIncompleteArrayType ? CharUnits::Zero() : TI.Width;
+ AlignIsRequired = TI.AlignIsRequired;
+ };
+
if (D->getType()->isIncompleteArrayType()) {
- // This is a flexible array member; we can't directly
- // query getTypeInfo about these, so we figure it out here.
- // Flexible array members don't have any size, but they
- // have to be aligned appropriately for their element type.
- EffectiveFieldSize = FieldSize = CharUnits::Zero();
- const ArrayType* ATy = Context.getAsArrayType(D->getType());
- FieldAlign = Context.getTypeAlignInChars(ATy->getElementType());
+ setDeclInfo(true /* IsIncompleteArrayType */);
} else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) {
unsigned AS = Context.getTargetAddressSpace(RT->getPointeeType());
- EffectiveFieldSize = FieldSize =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS));
- FieldAlign =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS));
+ EffectiveFieldSize = FieldSize = Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getPointerWidth(AS));
+ FieldAlign = Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getPointerAlign(AS));
} else {
- std::pair<CharUnits, CharUnits> FieldInfo =
- Context.getTypeInfoInChars(D->getType());
- EffectiveFieldSize = FieldSize = FieldInfo.first;
- FieldAlign = FieldInfo.second;
+ setDeclInfo(false /* IsIncompleteArrayType */);
// A potentially-overlapping field occupies its dsize or nvsize, whichever
// is larger.
@@ -1829,31 +1921,72 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
}
}
+ // The AIX `power` alignment rules apply the natural alignment of the
+ // "first member" if it is of a floating-point data type (or is an aggregate
+ // whose recursively "first" member or element is such a type). The alignment
+ // associated with these types for subsequent members use an alignment value
+ // where the floating-point data type is considered to have 4-byte alignment.
+ //
+ // For the purposes of the foregoing: vtable pointers, non-empty base classes,
+ // and zero-width bit-fields count as prior members; members of empty class
+ // types marked `no_unique_address` are not considered to be prior members.
+ CharUnits PreferredAlign = FieldAlign;
+ if (DefaultsToAIXPowerAlignment && !AlignIsRequired &&
+ (FoundFirstNonOverlappingEmptyFieldForAIX || IsNaturalAlign)) {
+ auto performBuiltinTypeAlignmentUpgrade = [&](const BuiltinType *BTy) {
+ if (BTy->getKind() == BuiltinType::Double ||
+ BTy->getKind() == BuiltinType::LongDouble) {
+ assert(PreferredAlign == CharUnits::fromQuantity(4) &&
+ "No need to upgrade the alignment value.");
+ PreferredAlign = CharUnits::fromQuantity(8);
+ }
+ };
+
+ const Type *Ty = D->getType()->getBaseElementTypeUnsafe();
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ performBuiltinTypeAlignmentUpgrade(CTy->getElementType()->castAs<BuiltinType>());
+ } else if (const BuiltinType *BTy = Ty->getAs<BuiltinType>()) {
+ performBuiltinTypeAlignmentUpgrade(BTy);
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ assert(RD && "Expected non-null RecordDecl.");
+ const ASTRecordLayout &FieldRecord = Context.getASTRecordLayout(RD);
+ PreferredAlign = FieldRecord.getPreferredAlignment();
+ }
+ }
+
// The align if the field is not packed. This is to check if the attribute
// was unnecessary (-Wpacked).
- CharUnits UnpackedFieldAlign = FieldAlign;
+ CharUnits UnpackedFieldAlign =
+ !DefaultsToAIXPowerAlignment ? FieldAlign : PreferredAlign;
CharUnits UnpackedFieldOffset = FieldOffset;
- if (FieldPacked)
+ if (FieldPacked) {
FieldAlign = CharUnits::One();
+ PreferredAlign = CharUnits::One();
+ }
CharUnits MaxAlignmentInChars =
- Context.toCharUnitsFromBits(D->getMaxAlignment());
+ Context.toCharUnitsFromBits(D->getMaxAlignment());
FieldAlign = std::max(FieldAlign, MaxAlignmentInChars);
+ PreferredAlign = std::max(PreferredAlign, MaxAlignmentInChars);
UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars);
// The maximum field alignment overrides the aligned attribute.
if (!MaxFieldAlignment.isZero()) {
FieldAlign = std::min(FieldAlign, MaxFieldAlignment);
+ PreferredAlign = std::min(PreferredAlign, MaxFieldAlignment);
UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignment);
}
+ CharUnits AlignTo =
+ !DefaultsToAIXPowerAlignment ? FieldAlign : PreferredAlign;
// Round up the current record size to the field's alignment boundary.
- FieldOffset = FieldOffset.alignTo(FieldAlign);
+ FieldOffset = FieldOffset.alignTo(AlignTo);
UnpackedFieldOffset = UnpackedFieldOffset.alignTo(UnpackedFieldAlign);
if (UseExternalLayout) {
FieldOffset = Context.toCharUnitsFromBits(
- updateExternalFieldOffset(D, Context.toBits(FieldOffset)));
+ updateExternalFieldOffset(D, Context.toBits(FieldOffset)));
if (!IsUnion && EmptySubobjects) {
// Record the fact that we're placing a field at this offset.
@@ -1869,9 +2002,9 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// We try offset 0 (for an empty field) and then dsize(C) onwards.
if (FieldOffset == CharUnits::Zero() &&
getDataSize() != CharUnits::Zero())
- FieldOffset = getDataSize().alignTo(FieldAlign);
+ FieldOffset = getDataSize().alignTo(AlignTo);
else
- FieldOffset += FieldAlign;
+ FieldOffset += AlignTo;
}
}
}
@@ -1908,9 +2041,9 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
(uint64_t)Context.toBits(FieldOffset + FieldSize)));
}
- // Remember max struct/class alignment.
+ // Remember max struct/class ABI-specified alignment.
UnadjustedAlignment = std::max(UnadjustedAlignment, FieldAlign);
- UpdateAlignment(FieldAlign, UnpackedFieldAlign);
+ UpdateAlignment(FieldAlign, UnpackedFieldAlign, PreferredAlign);
}
void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
@@ -1936,8 +2069,12 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastUnit;
uint64_t UnpackedSizeInBits =
llvm::alignTo(getSizeInBits(), Context.toBits(UnpackedAlignment));
- uint64_t RoundedSize =
- llvm::alignTo(getSizeInBits(), Context.toBits(Alignment));
+
+ uint64_t RoundedSize = llvm::alignTo(
+ getSizeInBits(),
+ Context.toBits(!Context.getTargetInfo().defaultsToAIXPowerAlignment()
+ ? Alignment
+ : PreferredAlignment));
if (UseExternalLayout) {
// If we're inferring alignment, and the external size is smaller than
@@ -1945,6 +2082,7 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
// alignment to 1.
if (InferAlignment && External.Size < RoundedSize) {
Alignment = CharUnits::One();
+ PreferredAlignment = CharUnits::One();
InferAlignment = false;
}
setSize(External.Size);
@@ -1981,7 +2119,8 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
}
void ItaniumRecordLayoutBuilder::UpdateAlignment(
- CharUnits NewAlignment, CharUnits UnpackedNewAlignment) {
+ CharUnits NewAlignment, CharUnits UnpackedNewAlignment,
+ CharUnits PreferredNewAlignment) {
// The alignment is not modified when using 'mac68k' alignment or when
// we have an externally-supplied layout that also provides overall alignment.
if (IsMac68kAlign || (UseExternalLayout && !InferAlignment))
@@ -1998,6 +2137,12 @@ void ItaniumRecordLayoutBuilder::UpdateAlignment(
"Alignment not a power of 2");
UnpackedAlignment = UnpackedNewAlignment;
}
+
+ if (PreferredNewAlignment > PreferredAlignment) {
+ assert(llvm::isPowerOf2_64(PreferredNewAlignment.getQuantity()) &&
+ "Alignment not a power of 2");
+ PreferredAlignment = PreferredNewAlignment;
+ }
}
uint64_t
@@ -2009,6 +2154,7 @@ ItaniumRecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
// The externally-supplied field offset is before the field offset we
// computed. Assume that the structure is packed.
Alignment = CharUnits::One();
+ PreferredAlignment = CharUnits::One();
InferAlignment = false;
}
@@ -2437,9 +2583,9 @@ MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
const FieldDecl *FD) {
// Get the alignment of the field type's natural alignment, ignore any
// alignment attributes.
- ElementInfo Info;
- std::tie(Info.Size, Info.Alignment) =
+ auto TInfo =
Context.getTypeInfoInChars(FD->getType()->getUnqualifiedDesugaredType());
+ ElementInfo Info{TInfo.Width, TInfo.Align};
// Respect align attributes on the field.
CharUnits FieldRequiredAlignment =
Context.toCharUnitsFromBits(FD->getMaxAlignment());
@@ -3063,10 +3209,10 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
Builder.cxxLayout(RD);
NewEntry = new (*this) ASTRecordLayout(
*this, Builder.Size, Builder.Alignment, Builder.Alignment,
- Builder.RequiredAlignment,
- Builder.HasOwnVFPtr, Builder.HasOwnVFPtr || Builder.PrimaryBase,
- Builder.VBPtrOffset, Builder.DataSize, Builder.FieldOffsets,
- Builder.NonVirtualSize, Builder.Alignment, CharUnits::Zero(),
+ Builder.Alignment, Builder.RequiredAlignment, Builder.HasOwnVFPtr,
+ Builder.HasOwnVFPtr || Builder.PrimaryBase, Builder.VBPtrOffset,
+ Builder.DataSize, Builder.FieldOffsets, Builder.NonVirtualSize,
+ Builder.Alignment, Builder.Alignment, CharUnits::Zero(),
Builder.PrimaryBase, false, Builder.SharedVBPtrBase,
Builder.EndsWithZeroSizedObject, Builder.LeadsWithZeroSizedBase,
Builder.Bases, Builder.VBases);
@@ -3074,8 +3220,8 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
Builder.layout(D);
NewEntry = new (*this) ASTRecordLayout(
*this, Builder.Size, Builder.Alignment, Builder.Alignment,
- Builder.RequiredAlignment,
- Builder.Size, Builder.FieldOffsets);
+ Builder.Alignment, Builder.RequiredAlignment, Builder.Size,
+ Builder.FieldOffsets);
}
} else {
if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
@@ -3095,11 +3241,13 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
CharUnits NonVirtualSize =
skipTailPadding ? DataSize : Builder.NonVirtualSize;
NewEntry = new (*this) ASTRecordLayout(
- *this, Builder.getSize(), Builder.Alignment, Builder.UnadjustedAlignment,
+ *this, Builder.getSize(), Builder.Alignment,
+ Builder.PreferredAlignment, Builder.UnadjustedAlignment,
/*RequiredAlignment : used by MS-ABI)*/
Builder.Alignment, Builder.HasOwnVFPtr, RD->isDynamicClass(),
CharUnits::fromQuantity(-1), DataSize, Builder.FieldOffsets,
NonVirtualSize, Builder.NonVirtualAlignment,
+ Builder.PreferredNVAlignment,
EmptySubobjects.SizeOfLargestEmptySubobject, Builder.PrimaryBase,
Builder.PrimaryBaseIsVirtual, nullptr, false, false, Builder.Bases,
Builder.VBases);
@@ -3108,7 +3256,8 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
Builder.Layout(D);
NewEntry = new (*this) ASTRecordLayout(
- *this, Builder.getSize(), Builder.Alignment, Builder.UnadjustedAlignment,
+ *this, Builder.getSize(), Builder.Alignment,
+ Builder.PreferredAlignment, Builder.UnadjustedAlignment,
/*RequiredAlignment : used by MS-ABI)*/
Builder.Alignment, Builder.getSize(), Builder.FieldOffsets);
}
@@ -3260,14 +3409,11 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
ItaniumRecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr);
Builder.Layout(D);
- const ASTRecordLayout *NewEntry =
- new (*this) ASTRecordLayout(*this, Builder.getSize(),
- Builder.Alignment,
- Builder.UnadjustedAlignment,
- /*RequiredAlignment : used by MS-ABI)*/
- Builder.Alignment,
- Builder.getDataSize(),
- Builder.FieldOffsets);
+ const ASTRecordLayout *NewEntry = new (*this) ASTRecordLayout(
+ *this, Builder.getSize(), Builder.Alignment, Builder.PreferredAlignment,
+ Builder.UnadjustedAlignment,
+ /*RequiredAlignment : used by MS-ABI)*/
+ Builder.Alignment, Builder.getDataSize(), Builder.FieldOffsets);
ObjCLayouts[Key] = NewEntry;
@@ -3430,22 +3576,26 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
if (CXXRD && !isMsLayout(C))
OS << ", dsize=" << Layout.getDataSize().getQuantity();
OS << ", align=" << Layout.getAlignment().getQuantity();
+ if (C.getTargetInfo().defaultsToAIXPowerAlignment())
+ OS << ", preferredalign=" << Layout.getPreferredAlignment().getQuantity();
if (CXXRD) {
OS << ",\n";
PrintIndentNoOffset(OS, IndentLevel - 1);
OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity();
OS << ", nvalign=" << Layout.getNonVirtualAlignment().getQuantity();
+ if (C.getTargetInfo().defaultsToAIXPowerAlignment())
+ OS << ", preferrednvalign="
+ << Layout.getPreferredNVAlignment().getQuantity();
}
OS << "]\n";
}
-void ASTContext::DumpRecordLayout(const RecordDecl *RD,
- raw_ostream &OS,
+void ASTContext::DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
bool Simple) const {
if (!Simple) {
::DumpRecordLayout(OS, RD, *this, CharUnits(), 0, nullptr,
- /*PrintSizeInfo*/true,
+ /*PrintSizeInfo*/ true,
/*IncludeVirtualBases=*/true);
return;
}
@@ -3465,9 +3615,13 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD,
if (!isMsLayout(*this))
OS << " DataSize:" << toBits(Info.getDataSize()) << "\n";
OS << " Alignment:" << toBits(Info.getAlignment()) << "\n";
+ if (Target->defaultsToAIXPowerAlignment())
+ OS << " PreferredAlignment:" << toBits(Info.getPreferredAlignment())
+ << "\n";
OS << " FieldOffsets: [";
for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) {
- if (i) OS << ", ";
+ if (i)
+ OS << ", ";
OS << Info.getFieldOffset(i);
}
OS << "]>\n";
diff --git a/clang/lib/AST/Stmt.cpp b/clang/lib/AST/Stmt.cpp
index 25e685be3e9b..83821ea6f5fc 100644
--- a/clang/lib/AST/Stmt.cpp
+++ b/clang/lib/AST/Stmt.cpp
@@ -13,11 +13,12 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclGroup.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/StmtCXX.h"
@@ -41,8 +42,8 @@
#include <cassert>
#include <cstring>
#include <string>
-#include <utility>
#include <type_traits>
+#include <utility>
using namespace clang;
@@ -129,6 +130,66 @@ void Stmt::EnableStatistics() {
StatisticsEnabled = true;
}
+static std::pair<Stmt::Likelihood, const Attr *>
+getLikelihood(ArrayRef<const Attr *> Attrs) {
+ for (const auto *A : Attrs) {
+ if (isa<LikelyAttr>(A))
+ return std::make_pair(Stmt::LH_Likely, A);
+
+ if (isa<UnlikelyAttr>(A))
+ return std::make_pair(Stmt::LH_Unlikely, A);
+ }
+
+ return std::make_pair(Stmt::LH_None, nullptr);
+}
+
+static std::pair<Stmt::Likelihood, const Attr *> getLikelihood(const Stmt *S) {
+ if (const auto *AS = dyn_cast_or_null<AttributedStmt>(S))
+ return getLikelihood(AS->getAttrs());
+
+ return std::make_pair(Stmt::LH_None, nullptr);
+}
+
+Stmt::Likelihood Stmt::getLikelihood(ArrayRef<const Attr *> Attrs) {
+ return ::getLikelihood(Attrs).first;
+}
+
+Stmt::Likelihood Stmt::getLikelihood(const Stmt *S) {
+ return ::getLikelihood(S).first;
+}
+
+const Attr *Stmt::getLikelihoodAttr(const Stmt *S) {
+ return ::getLikelihood(S).second;
+}
+
+Stmt::Likelihood Stmt::getLikelihood(const Stmt *Then, const Stmt *Else) {
+ Likelihood LHT = ::getLikelihood(Then).first;
+ Likelihood LHE = ::getLikelihood(Else).first;
+ if (LHE == LH_None)
+ return LHT;
+
+ // If the same attribute is used on both branches there's a conflict.
+ if (LHT == LHE)
+ return LH_None;
+
+ if (LHT != LH_None)
+ return LHT;
+
+ // Invert the value of Else to get the value for Then.
+ return LHE == LH_Likely ? LH_Unlikely : LH_Likely;
+}
+
+std::tuple<bool, const Attr *, const Attr *>
+Stmt::determineLikelihoodConflict(const Stmt *Then, const Stmt *Else) {
+ std::pair<Likelihood, const Attr *> LHT = ::getLikelihood(Then);
+ std::pair<Likelihood, const Attr *> LHE = ::getLikelihood(Else);
+ // If the same attribute is used on both branches there's a conflict.
+ if (LHT.first != LH_None && LHT.first == LHE.first)
+ return std::make_tuple(true, LHT.second, LHE.second);
+
+ return std::make_tuple(false, nullptr, nullptr);
+}
+
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *Stmt::IgnoreContainers(bool IgnoreCaptured) {
@@ -482,7 +543,6 @@ void GCCAsmStmt::setOutputsAndInputsAndClobbers(const ASTContext &C,
this->NumInputs = NumInputs;
this->NumClobbers = NumClobbers;
this->NumLabels = NumLabels;
- assert(!(NumOutputs && NumLabels) && "asm goto cannot have outputs");
unsigned NumExprs = NumOutputs + NumInputs + NumLabels;
@@ -731,7 +791,27 @@ std::string GCCAsmStmt::generateAsmString(const ASTContext &C) const {
/// Assemble final IR asm string (MS-style).
std::string MSAsmStmt::generateAsmString(const ASTContext &C) const {
// FIXME: This needs to be translated into the IR string representation.
- return std::string(AsmStr);
+ SmallVector<StringRef, 8> Pieces;
+ AsmStr.split(Pieces, "\n\t");
+ std::string MSAsmString;
+ for (size_t I = 0, E = Pieces.size(); I < E; ++I) {
+ StringRef Instruction = Pieces[I];
+ // For vex/vex2/vex3/evex masm style prefix, convert it to att style
+ // since we don't support masm style prefix in backend.
+ if (Instruction.startswith("vex "))
+ MSAsmString += '{' + Instruction.substr(0, 3).str() + '}' +
+ Instruction.substr(3).str();
+ else if (Instruction.startswith("vex2 ") ||
+ Instruction.startswith("vex3 ") || Instruction.startswith("evex "))
+ MSAsmString += '{' + Instruction.substr(0, 4).str() + '}' +
+ Instruction.substr(4).str();
+ else
+ MSAsmString += Instruction.str();
+ // If this is not the last instruction, adding back the '\n\t'.
+ if (I < E - 1)
+ MSAsmString += "\n\t";
+ }
+ return MSAsmString;
}
Expr *MSAsmStmt::getOutputExpr(unsigned i) {
@@ -827,9 +907,9 @@ void MSAsmStmt::initialize(const ASTContext &C, StringRef asmstr,
}
IfStmt::IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr,
- Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then,
- SourceLocation EL, Stmt *Else)
- : Stmt(IfStmtClass) {
+ Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LPL,
+ SourceLocation RPL, Stmt *Then, SourceLocation EL, Stmt *Else)
+ : Stmt(IfStmtClass), LParenLoc(LPL), RParenLoc(RPL) {
bool HasElse = Else != nullptr;
bool HasVar = Var != nullptr;
bool HasInit = Init != nullptr;
@@ -862,7 +942,8 @@ IfStmt::IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit)
IfStmt *IfStmt::Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
- Stmt *Then, SourceLocation EL, Stmt *Else) {
+ SourceLocation LPL, SourceLocation RPL, Stmt *Then,
+ SourceLocation EL, Stmt *Else) {
bool HasElse = Else != nullptr;
bool HasVar = Var != nullptr;
bool HasInit = Init != nullptr;
@@ -871,7 +952,7 @@ IfStmt *IfStmt::Create(const ASTContext &Ctx, SourceLocation IL,
NumMandatoryStmtPtr + HasElse + HasVar + HasInit, HasElse),
alignof(IfStmt));
return new (Mem)
- IfStmt(Ctx, IL, IsConstexpr, Init, Var, Cond, Then, EL, Else);
+ IfStmt(Ctx, IL, IsConstexpr, Init, Var, Cond, LPL, RPL, Then, EL, Else);
}
IfStmt *IfStmt::CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
@@ -947,8 +1028,10 @@ void ForStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
}
SwitchStmt::SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
- Expr *Cond)
- : Stmt(SwitchStmtClass), FirstCase(nullptr) {
+ Expr *Cond, SourceLocation LParenLoc,
+ SourceLocation RParenLoc)
+ : Stmt(SwitchStmtClass), FirstCase(nullptr), LParenLoc(LParenLoc),
+ RParenLoc(RParenLoc) {
bool HasInit = Init != nullptr;
bool HasVar = Var != nullptr;
SwitchStmtBits.HasInit = HasInit;
@@ -973,13 +1056,14 @@ SwitchStmt::SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar)
}
SwitchStmt *SwitchStmt::Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
- Expr *Cond) {
+ Expr *Cond, SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
bool HasInit = Init != nullptr;
bool HasVar = Var != nullptr;
void *Mem = Ctx.Allocate(
totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasInit + HasVar),
alignof(SwitchStmt));
- return new (Mem) SwitchStmt(Ctx, Init, Var, Cond);
+ return new (Mem) SwitchStmt(Ctx, Init, Var, Cond, LParenLoc, RParenLoc);
}
SwitchStmt *SwitchStmt::CreateEmpty(const ASTContext &Ctx, bool HasInit,
diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp
index 788fac789270..c858261f2387 100644
--- a/clang/lib/AST/StmtOpenMP.cpp
+++ b/clang/lib/AST/StmtOpenMP.cpp
@@ -16,10 +16,43 @@
using namespace clang;
using namespace llvm::omp;
-void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
- assert(Clauses.size() == getNumClauses() &&
+size_t OMPChildren::size(unsigned NumClauses, bool HasAssociatedStmt,
+ unsigned NumChildren) {
+ return llvm::alignTo(
+ totalSizeToAlloc<OMPClause *, Stmt *>(
+ NumClauses, NumChildren + (HasAssociatedStmt ? 1 : 0)),
+ alignof(OMPChildren));
+}
+
+void OMPChildren::setClauses(ArrayRef<OMPClause *> Clauses) {
+ assert(Clauses.size() == NumClauses &&
"Number of clauses is not the same as the preallocated buffer");
- std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
+ llvm::copy(Clauses, getTrailingObjects<OMPClause *>());
+}
+
+MutableArrayRef<Stmt *> OMPChildren::getChildren() {
+ return llvm::makeMutableArrayRef(getTrailingObjects<Stmt *>(), NumChildren);
+}
+
+OMPChildren *OMPChildren::Create(void *Mem, ArrayRef<OMPClause *> Clauses) {
+ auto *Data = CreateEmpty(Mem, Clauses.size());
+ Data->setClauses(Clauses);
+ return Data;
+}
+
+OMPChildren *OMPChildren::Create(void *Mem, ArrayRef<OMPClause *> Clauses,
+ Stmt *S, unsigned NumChildren) {
+ auto *Data = CreateEmpty(Mem, Clauses.size(), S, NumChildren);
+ Data->setClauses(Clauses);
+ if (S)
+ Data->setAssociatedStmt(S);
+ return Data;
+}
+
+OMPChildren *OMPChildren::CreateEmpty(void *Mem, unsigned NumClauses,
+ bool HasAssociatedStmt,
+ unsigned NumChildren) {
+ return new (Mem) OMPChildren(NumClauses, NumChildren, HasAssociatedStmt);
}
bool OMPExecutableDirective::isStandaloneDirective() const {
@@ -30,15 +63,15 @@ bool OMPExecutableDirective::isStandaloneDirective() const {
isa<OMPTargetExitDataDirective>(this) ||
isa<OMPTargetUpdateDirective>(this))
return true;
- return !hasAssociatedStmt() || !getAssociatedStmt();
+ return !hasAssociatedStmt();
}
-const Stmt *OMPExecutableDirective::getStructuredBlock() const {
+Stmt *OMPExecutableDirective::getStructuredBlock() {
assert(!isStandaloneDirective() &&
"Standalone Executable Directives don't have Structured Blocks.");
if (auto *LD = dyn_cast<OMPLoopDirective>(this))
return LD->getBody();
- return getInnermostCapturedStmt()->getCapturedStmt();
+ return getRawStmt();
}
Stmt *OMPLoopDirective::tryToFindNextInnerLoop(Stmt *CurStmt,
@@ -87,8 +120,7 @@ Stmt *OMPLoopDirective::tryToFindNextInnerLoop(Stmt *CurStmt,
Stmt *OMPLoopDirective::getBody() {
// This relies on the loop form is already checked by Sema.
- Stmt *Body =
- getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
+ Stmt *Body = Data->getRawStmt()->IgnoreContainers();
if (auto *For = dyn_cast<ForStmt>(Body)) {
Body = For->getBody();
} else {
@@ -112,32 +144,32 @@ Stmt *OMPLoopDirective::getBody() {
void OMPLoopDirective::setCounters(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() &&
"Number of loop counters is not the same as the collapsed number");
- std::copy(A.begin(), A.end(), getCounters().begin());
+ llvm::copy(A, getCounters().begin());
}
void OMPLoopDirective::setPrivateCounters(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() && "Number of loop private counters "
"is not the same as the collapsed "
"number");
- std::copy(A.begin(), A.end(), getPrivateCounters().begin());
+ llvm::copy(A, getPrivateCounters().begin());
}
void OMPLoopDirective::setInits(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() &&
"Number of counter inits is not the same as the collapsed number");
- std::copy(A.begin(), A.end(), getInits().begin());
+ llvm::copy(A, getInits().begin());
}
void OMPLoopDirective::setUpdates(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() &&
"Number of counter updates is not the same as the collapsed number");
- std::copy(A.begin(), A.end(), getUpdates().begin());
+ llvm::copy(A, getUpdates().begin());
}
void OMPLoopDirective::setFinals(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() &&
"Number of counter finals is not the same as the collapsed number");
- std::copy(A.begin(), A.end(), getFinals().begin());
+ llvm::copy(A, getFinals().begin());
}
void OMPLoopDirective::setDependentCounters(ArrayRef<Expr *> A) {
@@ -163,14 +195,8 @@ OMPParallelDirective *OMPParallelDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPParallelDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPParallelDirective *Dir =
- new (Mem) OMPParallelDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPParallelDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/1, StartLoc, EndLoc);
Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
@@ -179,11 +205,9 @@ OMPParallelDirective *OMPParallelDirective::Create(
OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPParallelDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPParallelDirective(NumClauses);
+ return createEmptyDirective<OMPParallelDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true,
+ /*NumChildren=*/1);
}
OMPSimdDirective *
@@ -191,14 +215,9 @@ OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size = llvm::alignTo(sizeof(OMPSimdDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
- OMPSimdDirective *Dir = new (Mem)
- OMPSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPSimdDirective>(
+ C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_simd),
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -222,25 +241,18 @@ OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPSimdDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
- return new (Mem) OMPSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_simd), CollapsedNum);
}
OMPForDirective *OMPForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
- unsigned Size = llvm::alignTo(sizeof(OMPForDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
- OMPForDirective *Dir =
- new (Mem) OMPForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPForDirective>(
+ C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_for) + 1,
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -274,11 +286,9 @@ OMPForDirective *OMPForDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPForDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
- return new (Mem) OMPForDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPForDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_for) + 1, CollapsedNum);
}
OMPForSimdDirective *
@@ -286,15 +296,9 @@ OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPForSimdDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
- OMPForSimdDirective *Dir = new (Mem)
- OMPForSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPForSimdDirective>(
+ C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_for_simd),
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -326,26 +330,18 @@ OMPForSimdDirective *OMPForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPForSimdDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
- return new (Mem) OMPForSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPForSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_for_simd), CollapsedNum);
}
OMPSectionsDirective *OMPSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPSectionsDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPSectionsDirective *Dir =
- new (Mem) OMPSectionsDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPSectionsDirective>(C, Clauses, AssociatedStmt,
+ /*NumChildren=*/1, StartLoc,
+ EndLoc);
Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
@@ -354,11 +350,9 @@ OMPSectionsDirective *OMPSectionsDirective::Create(
OMPSectionsDirective *OMPSectionsDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPSectionsDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPSectionsDirective(NumClauses);
+ return createEmptyDirective<OMPSectionsDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true,
+ /*NumChildren=*/1);
}
OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C,
@@ -366,19 +360,17 @@ OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C,
SourceLocation EndLoc,
Stmt *AssociatedStmt,
bool HasCancel) {
- unsigned Size = llvm::alignTo(sizeof(OMPSectionDirective), alignof(Stmt *));
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- OMPSectionDirective *Dir = new (Mem) OMPSectionDirective(StartLoc, EndLoc);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir =
+ createDirective<OMPSectionDirective>(C, llvm::None, AssociatedStmt,
+ /*NumChildre=*/0, StartLoc, EndLoc);
Dir->setHasCancel(HasCancel);
return Dir;
}
OMPSectionDirective *OMPSectionDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPSectionDirective), alignof(Stmt *));
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- return new (Mem) OMPSectionDirective();
+ return createEmptyDirective<OMPSectionDirective>(C, /*NumClauses=*/0,
+ /*HasAssociatedStmt=*/true);
}
OMPSingleDirective *OMPSingleDirective::Create(const ASTContext &C,
@@ -386,83 +378,57 @@ OMPSingleDirective *OMPSingleDirective::Create(const ASTContext &C,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPSingleDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPSingleDirective *Dir =
- new (Mem) OMPSingleDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPSingleDirective>(C, Clauses, AssociatedStmt,
+ /*NumChildren=*/0, StartLoc,
+ EndLoc);
}
OMPSingleDirective *OMPSingleDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPSingleDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPSingleDirective(NumClauses);
+ return createEmptyDirective<OMPSingleDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true);
}
OMPMasterDirective *OMPMasterDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt) {
- unsigned Size = llvm::alignTo(sizeof(OMPMasterDirective), alignof(Stmt *));
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- OMPMasterDirective *Dir = new (Mem) OMPMasterDirective(StartLoc, EndLoc);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPMasterDirective>(C, llvm::None, AssociatedStmt,
+ /*NumChildren=*/0, StartLoc,
+ EndLoc);
}
OMPMasterDirective *OMPMasterDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPMasterDirective), alignof(Stmt *));
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- return new (Mem) OMPMasterDirective();
+ return createEmptyDirective<OMPMasterDirective>(C, /*NumClauses=*/0,
+ /*HasAssociatedStmt=*/true);
}
OMPCriticalDirective *OMPCriticalDirective::Create(
const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPCriticalDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPCriticalDirective *Dir =
- new (Mem) OMPCriticalDirective(Name, StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPCriticalDirective>(C, Clauses, AssociatedStmt,
+ /*NumChildren=*/0, Name,
+ StartLoc, EndLoc);
}
OMPCriticalDirective *OMPCriticalDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPCriticalDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPCriticalDirective(NumClauses);
+ return createEmptyDirective<OMPCriticalDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true);
}
OMPParallelForDirective *OMPParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPParallelForDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_parallel_for));
- OMPParallelForDirective *Dir = new (Mem)
- OMPParallelForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPParallelForDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_parallel_for) + 1, StartLoc, EndLoc,
+ CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -495,27 +461,19 @@ OMPParallelForDirective *OMPParallelForDirective::Create(
OMPParallelForDirective *
OMPParallelForDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPParallelForDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_parallel_for));
- return new (Mem) OMPParallelForDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPParallelForDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_parallel_for) + 1, CollapsedNum);
}
OMPParallelForSimdDirective *OMPParallelForSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPParallelForSimdDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
- OMPParallelForSimdDirective *Dir = new (Mem) OMPParallelForSimdDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPParallelForSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_parallel_for_simd), StartLoc, EndLoc,
+ CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -547,51 +505,33 @@ OMPParallelForSimdDirective *
OMPParallelForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPParallelForSimdDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
- return new (Mem) OMPParallelForSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPParallelForSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_parallel_for_simd), CollapsedNum);
}
OMPParallelMasterDirective *OMPParallelMasterDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPParallelMasterDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- auto *Dir =
- new (Mem) OMPParallelMasterDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPParallelMasterDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/1, StartLoc, EndLoc);
Dir->setTaskReductionRefExpr(TaskRedRef);
return Dir;
}
-OMPParallelMasterDirective *OMPParallelMasterDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPParallelMasterDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPParallelMasterDirective(NumClauses);
+OMPParallelMasterDirective *
+OMPParallelMasterDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell) {
+ return createEmptyDirective<OMPParallelMasterDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/1);
}
OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPParallelSectionsDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPParallelSectionsDirective *Dir =
- new (Mem) OMPParallelSectionsDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPParallelSectionsDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/1, StartLoc, EndLoc);
Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
@@ -600,24 +540,16 @@ OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
OMPParallelSectionsDirective *
OMPParallelSectionsDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPParallelSectionsDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPParallelSectionsDirective(NumClauses);
+ return createEmptyDirective<OMPParallelSectionsDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/1);
}
OMPTaskDirective *
OMPTaskDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel) {
- unsigned Size = llvm::alignTo(sizeof(OMPTaskDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTaskDirective *Dir =
- new (Mem) OMPTaskDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTaskDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/0, StartLoc, EndLoc);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -625,111 +557,79 @@ OMPTaskDirective::Create(const ASTContext &C, SourceLocation StartLoc,
OMPTaskDirective *OMPTaskDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPTaskDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPTaskDirective(NumClauses);
+ return createEmptyDirective<OMPTaskDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true);
}
OMPTaskyieldDirective *OMPTaskyieldDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- void *Mem = C.Allocate(sizeof(OMPTaskyieldDirective));
- OMPTaskyieldDirective *Dir =
- new (Mem) OMPTaskyieldDirective(StartLoc, EndLoc);
- return Dir;
+ return new (C) OMPTaskyieldDirective(StartLoc, EndLoc);
}
OMPTaskyieldDirective *OMPTaskyieldDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
- void *Mem = C.Allocate(sizeof(OMPTaskyieldDirective));
- return new (Mem) OMPTaskyieldDirective();
+ return new (C) OMPTaskyieldDirective();
}
OMPBarrierDirective *OMPBarrierDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- void *Mem = C.Allocate(sizeof(OMPBarrierDirective));
- OMPBarrierDirective *Dir = new (Mem) OMPBarrierDirective(StartLoc, EndLoc);
- return Dir;
+ return new (C) OMPBarrierDirective(StartLoc, EndLoc);
}
OMPBarrierDirective *OMPBarrierDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
- void *Mem = C.Allocate(sizeof(OMPBarrierDirective));
- return new (Mem) OMPBarrierDirective();
+ return new (C) OMPBarrierDirective();
}
OMPTaskwaitDirective *OMPTaskwaitDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- void *Mem = C.Allocate(sizeof(OMPTaskwaitDirective));
- OMPTaskwaitDirective *Dir = new (Mem) OMPTaskwaitDirective(StartLoc, EndLoc);
- return Dir;
+ return new (C) OMPTaskwaitDirective(StartLoc, EndLoc);
}
OMPTaskwaitDirective *OMPTaskwaitDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
- void *Mem = C.Allocate(sizeof(OMPTaskwaitDirective));
- return new (Mem) OMPTaskwaitDirective();
+ return new (C) OMPTaskwaitDirective();
}
OMPTaskgroupDirective *OMPTaskgroupDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef) {
- unsigned Size = llvm::alignTo(sizeof(OMPTaskgroupDirective) +
- sizeof(OMPClause *) * Clauses.size(),
- alignof(Stmt *));
- void *Mem = C.Allocate(Size + sizeof(Stmt *) + sizeof(Expr *));
- OMPTaskgroupDirective *Dir =
- new (Mem) OMPTaskgroupDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTaskgroupDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/1, StartLoc, EndLoc);
Dir->setReductionRef(ReductionRef);
- Dir->setClauses(Clauses);
return Dir;
}
OMPTaskgroupDirective *OMPTaskgroupDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPTaskgroupDirective) +
- sizeof(OMPClause *) * NumClauses,
- alignof(Stmt *));
- void *Mem = C.Allocate(Size + sizeof(Stmt *) + sizeof(Expr *));
- return new (Mem) OMPTaskgroupDirective(NumClauses);
+ return createEmptyDirective<OMPTaskgroupDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/1);
}
OMPCancellationPointDirective *OMPCancellationPointDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPCancellationPointDirective), alignof(Stmt *));
- void *Mem = C.Allocate(Size);
- OMPCancellationPointDirective *Dir =
- new (Mem) OMPCancellationPointDirective(StartLoc, EndLoc);
+ auto *Dir = new (C) OMPCancellationPointDirective(StartLoc, EndLoc);
Dir->setCancelRegion(CancelRegion);
return Dir;
}
OMPCancellationPointDirective *
OMPCancellationPointDirective::CreateEmpty(const ASTContext &C, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPCancellationPointDirective), alignof(Stmt *));
- void *Mem = C.Allocate(Size);
- return new (Mem) OMPCancellationPointDirective();
+ return new (C) OMPCancellationPointDirective();
}
OMPCancelDirective *
OMPCancelDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
OpenMPDirectiveKind CancelRegion) {
- unsigned Size = llvm::alignTo(sizeof(OMPCancelDirective) +
- sizeof(OMPClause *) * Clauses.size(),
- alignof(Stmt *));
- void *Mem = C.Allocate(Size);
- OMPCancelDirective *Dir =
- new (Mem) OMPCancelDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
+ auto *Dir = createDirective<OMPCancelDirective>(
+ C, Clauses, /*AssociatedStmt=*/nullptr, /*NumChildren=*/0, StartLoc,
+ EndLoc);
Dir->setCancelRegion(CancelRegion);
return Dir;
}
@@ -737,77 +637,52 @@ OMPCancelDirective::Create(const ASTContext &C, SourceLocation StartLoc,
OMPCancelDirective *OMPCancelDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPCancelDirective) +
- sizeof(OMPClause *) * NumClauses,
- alignof(Stmt *));
- void *Mem = C.Allocate(Size);
- return new (Mem) OMPCancelDirective(NumClauses);
+ return createEmptyDirective<OMPCancelDirective>(C, NumClauses);
}
OMPFlushDirective *OMPFlushDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPFlushDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size());
- OMPFlushDirective *Dir =
- new (Mem) OMPFlushDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- return Dir;
+ return createDirective<OMPFlushDirective>(
+ C, Clauses, /*AssociatedStmt=*/nullptr, /*NumChildren=*/0, StartLoc,
+ EndLoc);
}
OMPFlushDirective *OMPFlushDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPFlushDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses);
- return new (Mem) OMPFlushDirective(NumClauses);
+ return createEmptyDirective<OMPFlushDirective>(C, NumClauses);
}
OMPDepobjDirective *OMPDepobjDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPDepobjDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size(),
- alignof(OMPDepobjDirective));
- auto *Dir = new (Mem) OMPDepobjDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- return Dir;
+ return createDirective<OMPDepobjDirective>(
+ C, Clauses, /*AssociatedStmt=*/nullptr,
+ /*NumChildren=*/0, StartLoc, EndLoc);
}
OMPDepobjDirective *OMPDepobjDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPDepobjDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses,
- alignof(OMPDepobjDirective));
- return new (Mem) OMPDepobjDirective(NumClauses);
+ return createEmptyDirective<OMPDepobjDirective>(C, NumClauses);
}
OMPScanDirective *OMPScanDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses) {
- unsigned Size = llvm::alignTo(sizeof(OMPScanDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size(),
- alignof(OMPScanDirective));
- auto *Dir = new (Mem) OMPScanDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- return Dir;
+ return createDirective<OMPScanDirective>(C, Clauses,
+ /*AssociatedStmt=*/nullptr,
+ /*NumChildren=*/0, StartLoc, EndLoc);
}
OMPScanDirective *OMPScanDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPScanDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses,
- alignof(OMPScanDirective));
- return new (Mem) OMPScanDirective(NumClauses);
+ return createEmptyDirective<OMPScanDirective>(C, NumClauses);
}
OMPOrderedDirective *OMPOrderedDirective::Create(const ASTContext &C,
@@ -815,39 +690,25 @@ OMPOrderedDirective *OMPOrderedDirective::Create(const ASTContext &C,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPOrderedDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(Stmt *) + sizeof(OMPClause *) * Clauses.size());
- OMPOrderedDirective *Dir =
- new (Mem) OMPOrderedDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPOrderedDirective>(
+ C, Clauses, cast_or_null<CapturedStmt>(AssociatedStmt),
+ /*NumChildren=*/0, StartLoc, EndLoc);
}
OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
+ bool IsStandalone,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPOrderedDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(Stmt *) + sizeof(OMPClause *) * NumClauses);
- return new (Mem) OMPOrderedDirective(NumClauses);
+ return createEmptyDirective<OMPOrderedDirective>(C, NumClauses,
+ !IsStandalone);
}
OMPAtomicDirective *OMPAtomicDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPAtomicDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- 5 * sizeof(Stmt *));
- OMPAtomicDirective *Dir =
- new (Mem) OMPAtomicDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPAtomicDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/4, StartLoc, EndLoc);
Dir->setX(X);
Dir->setV(V);
Dir->setExpr(E);
@@ -860,11 +721,8 @@ OMPAtomicDirective *OMPAtomicDirective::Create(
OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPAtomicDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + 5 * sizeof(Stmt *));
- return new (Mem) OMPAtomicDirective(NumClauses);
+ return createEmptyDirective<OMPAtomicDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/4);
}
OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C,
@@ -872,39 +730,23 @@ OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTargetDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTargetDirective *Dir =
- new (Mem) OMPTargetDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPTargetDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/0, StartLoc, EndLoc);
}
OMPTargetDirective *OMPTargetDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTargetDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPTargetDirective(NumClauses);
+ return createEmptyDirective<OMPTargetDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true);
}
OMPTargetParallelDirective *OMPTargetParallelDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTargetParallelDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTargetParallelDirective *Dir =
- new (Mem) OMPTargetParallelDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTargetParallelDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/1, StartLoc, EndLoc);
Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
@@ -913,26 +755,18 @@ OMPTargetParallelDirective *OMPTargetParallelDirective::Create(
OMPTargetParallelDirective *
OMPTargetParallelDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTargetParallelDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPTargetParallelDirective(NumClauses);
+ return createEmptyDirective<OMPTargetParallelDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/1);
}
OMPTargetParallelForDirective *OMPTargetParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
- unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelForDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_target_parallel_for));
- OMPTargetParallelForDirective *Dir = new (Mem) OMPTargetParallelForDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTargetParallelForDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_target_parallel_for) + 1, StartLoc,
+ EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -966,78 +800,52 @@ OMPTargetParallelForDirective *
OMPTargetParallelForDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelForDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_target_parallel_for));
- return new (Mem) OMPTargetParallelForDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTargetParallelForDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_target_parallel_for) + 1,
+ CollapsedNum);
}
OMPTargetDataDirective *OMPTargetDataDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
- void *Mem = C.Allocate(
- llvm::alignTo(sizeof(OMPTargetDataDirective), alignof(OMPClause *)) +
- sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTargetDataDirective *Dir =
- new (Mem) OMPTargetDataDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPTargetDataDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/0, StartLoc, EndLoc);
}
OMPTargetDataDirective *OMPTargetDataDirective::CreateEmpty(const ASTContext &C,
unsigned N,
EmptyShell) {
- void *Mem = C.Allocate(
- llvm::alignTo(sizeof(OMPTargetDataDirective), alignof(OMPClause *)) +
- sizeof(OMPClause *) * N + sizeof(Stmt *));
- return new (Mem) OMPTargetDataDirective(N);
+ return createEmptyDirective<OMPTargetDataDirective>(
+ C, N, /*HasAssociatedStmt=*/true);
}
OMPTargetEnterDataDirective *OMPTargetEnterDataDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
- void *Mem = C.Allocate(
- llvm::alignTo(sizeof(OMPTargetEnterDataDirective), alignof(OMPClause *)) +
- sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTargetEnterDataDirective *Dir =
- new (Mem) OMPTargetEnterDataDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPTargetEnterDataDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/0, StartLoc, EndLoc);
}
OMPTargetEnterDataDirective *
OMPTargetEnterDataDirective::CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell) {
- void *Mem = C.Allocate(
- llvm::alignTo(sizeof(OMPTargetEnterDataDirective), alignof(OMPClause *)) +
- sizeof(OMPClause *) * N + sizeof(Stmt *));
- return new (Mem) OMPTargetEnterDataDirective(N);
+ return createEmptyDirective<OMPTargetEnterDataDirective>(
+ C, N, /*HasAssociatedStmt=*/true);
}
OMPTargetExitDataDirective *OMPTargetExitDataDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
- void *Mem = C.Allocate(
- llvm::alignTo(sizeof(OMPTargetExitDataDirective), alignof(OMPClause *)) +
- sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTargetExitDataDirective *Dir =
- new (Mem) OMPTargetExitDataDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPTargetExitDataDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/0, StartLoc, EndLoc);
}
OMPTargetExitDataDirective *
OMPTargetExitDataDirective::CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell) {
- void *Mem = C.Allocate(
- llvm::alignTo(sizeof(OMPTargetExitDataDirective), alignof(OMPClause *)) +
- sizeof(OMPClause *) * N + sizeof(Stmt *));
- return new (Mem) OMPTargetExitDataDirective(N);
+ return createEmptyDirective<OMPTargetExitDataDirective>(
+ C, N, /*HasAssociatedStmt=*/true);
}
OMPTeamsDirective *OMPTeamsDirective::Create(const ASTContext &C,
@@ -1045,40 +853,24 @@ OMPTeamsDirective *OMPTeamsDirective::Create(const ASTContext &C,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTeamsDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTeamsDirective *Dir =
- new (Mem) OMPTeamsDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPTeamsDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/0, StartLoc, EndLoc);
}
OMPTeamsDirective *OMPTeamsDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTeamsDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPTeamsDirective(NumClauses);
+ return createEmptyDirective<OMPTeamsDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true);
}
OMPTaskLoopDirective *OMPTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, bool HasCancel) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTaskLoopDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_taskloop));
- OMPTaskLoopDirective *Dir = new (Mem)
- OMPTaskLoopDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTaskLoopDirective>(
+ C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_taskloop),
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1111,27 +903,19 @@ OMPTaskLoopDirective *OMPTaskLoopDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTaskLoopDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_taskloop));
- return new (Mem) OMPTaskLoopDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTaskLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_taskloop), CollapsedNum);
}
OMPTaskLoopSimdDirective *OMPTaskLoopSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTaskLoopSimdDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_taskloop_simd));
- OMPTaskLoopSimdDirective *Dir = new (Mem)
- OMPTaskLoopSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTaskLoopSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_taskloop_simd), StartLoc, EndLoc,
+ CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1162,27 +946,19 @@ OMPTaskLoopSimdDirective *OMPTaskLoopSimdDirective::Create(
OMPTaskLoopSimdDirective *
OMPTaskLoopSimdDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTaskLoopSimdDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_taskloop_simd));
- return new (Mem) OMPTaskLoopSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTaskLoopSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_taskloop_simd), CollapsedNum);
}
OMPMasterTaskLoopDirective *OMPMasterTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, bool HasCancel) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPMasterTaskLoopDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_master_taskloop));
- OMPMasterTaskLoopDirective *Dir = new (Mem) OMPMasterTaskLoopDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPMasterTaskLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_master_taskloop), StartLoc, EndLoc,
+ CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1215,28 +991,19 @@ OMPMasterTaskLoopDirective *
OMPMasterTaskLoopDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPMasterTaskLoopDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_master_taskloop));
- return new (Mem) OMPMasterTaskLoopDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPMasterTaskLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_master_taskloop), CollapsedNum);
}
OMPMasterTaskLoopSimdDirective *OMPMasterTaskLoopSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size = llvm::alignTo(sizeof(OMPMasterTaskLoopSimdDirective),
- alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_master_taskloop_simd));
- auto *Dir = new (Mem) OMPMasterTaskLoopSimdDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPMasterTaskLoopSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_master_taskloop_simd), StartLoc,
+ EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1268,29 +1035,19 @@ OMPMasterTaskLoopSimdDirective *
OMPMasterTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPMasterTaskLoopSimdDirective),
- alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_master_taskloop_simd));
- return new (Mem) OMPMasterTaskLoopSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPMasterTaskLoopSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_master_taskloop_simd), CollapsedNum);
}
OMPParallelMasterTaskLoopDirective *OMPParallelMasterTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, bool HasCancel) {
- unsigned Size = llvm::alignTo(sizeof(OMPParallelMasterTaskLoopDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_parallel_master_taskloop));
- auto *Dir = new (Mem) OMPParallelMasterTaskLoopDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPParallelMasterTaskLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_parallel_master_taskloop), StartLoc,
+ EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1324,13 +1081,10 @@ OMPParallelMasterTaskLoopDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPParallelMasterTaskLoopDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_parallel_master_taskloop));
- return new (Mem) OMPParallelMasterTaskLoopDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPParallelMasterTaskLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_parallel_master_taskloop),
+ CollapsedNum);
}
OMPParallelMasterTaskLoopSimdDirective *
@@ -1338,16 +1092,10 @@ OMPParallelMasterTaskLoopSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size = llvm::alignTo(sizeof(OMPParallelMasterTaskLoopSimdDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_parallel_master_taskloop_simd));
- auto *Dir = new (Mem) OMPParallelMasterTaskLoopSimdDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPParallelMasterTaskLoopSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_parallel_master_taskloop_simd),
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1380,29 +1128,20 @@ OMPParallelMasterTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPParallelMasterTaskLoopSimdDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_parallel_master_taskloop_simd));
- return new (Mem)
- OMPParallelMasterTaskLoopSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPParallelMasterTaskLoopSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_parallel_master_taskloop_simd),
+ CollapsedNum);
}
OMPDistributeDirective *OMPDistributeDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPDistributeDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_distribute));
- OMPDistributeDirective *Dir = new (Mem)
- OMPDistributeDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPDistributeDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_distribute), StartLoc, EndLoc,
+ CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1433,53 +1172,34 @@ OMPDistributeDirective *OMPDistributeDirective::Create(
OMPDistributeDirective *
OMPDistributeDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPDistributeDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_distribute));
- return new (Mem) OMPDistributeDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPDistributeDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_distribute), CollapsedNum);
}
OMPTargetUpdateDirective *OMPTargetUpdateDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTargetUpdateDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTargetUpdateDirective *Dir =
- new (Mem) OMPTargetUpdateDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPTargetUpdateDirective>(C, Clauses, AssociatedStmt,
+ /*NumChildren=*/0, StartLoc,
+ EndLoc);
}
OMPTargetUpdateDirective *
OMPTargetUpdateDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTargetUpdateDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPTargetUpdateDirective(NumClauses);
+ return createEmptyDirective<OMPTargetUpdateDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true);
}
OMPDistributeParallelForDirective *OMPDistributeParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
- unsigned Size = llvm::alignTo(sizeof(OMPDistributeParallelForDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_distribute_parallel_for));
- OMPDistributeParallelForDirective *Dir =
- new (Mem) OMPDistributeParallelForDirective(StartLoc, EndLoc,
- CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPDistributeParallelForDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_distribute_parallel_for) + 1, StartLoc,
+ EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1527,13 +1247,10 @@ OMPDistributeParallelForDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPDistributeParallelForDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_distribute_parallel_for));
- return new (Mem) OMPDistributeParallelForDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPDistributeParallelForDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_distribute_parallel_for) + 1,
+ CollapsedNum);
}
OMPDistributeParallelForSimdDirective *
@@ -1541,17 +1258,10 @@ OMPDistributeParallelForSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size = llvm::alignTo(sizeof(OMPDistributeParallelForSimdDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_distribute_parallel_for_simd));
- OMPDistributeParallelForSimdDirective *Dir = new (Mem)
- OMPDistributeParallelForSimdDirective(StartLoc, EndLoc, CollapsedNum,
- Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPDistributeParallelForSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_distribute_parallel_for_simd),
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1597,30 +1307,20 @@ OMPDistributeParallelForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPDistributeParallelForSimdDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_distribute_parallel_for_simd));
- return new (Mem)
- OMPDistributeParallelForSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPDistributeParallelForSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_distribute_parallel_for_simd),
+ CollapsedNum);
}
OMPDistributeSimdDirective *OMPDistributeSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPDistributeSimdDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_distribute_simd));
- OMPDistributeSimdDirective *Dir = new (Mem) OMPDistributeSimdDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPDistributeSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_distribute_simd), StartLoc, EndLoc,
+ CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1652,30 +1352,19 @@ OMPDistributeSimdDirective *
OMPDistributeSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPDistributeSimdDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_distribute_simd));
- return new (Mem) OMPDistributeSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPDistributeSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_distribute_simd), CollapsedNum);
}
OMPTargetParallelForSimdDirective *OMPTargetParallelForSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelForSimdDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_target_parallel_for_simd));
- OMPTargetParallelForSimdDirective *Dir =
- new (Mem) OMPTargetParallelForSimdDirective(StartLoc, EndLoc,
- CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTargetParallelForSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_target_parallel_for_simd), StartLoc,
+ EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1708,13 +1397,10 @@ OMPTargetParallelForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelForSimdDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_target_parallel_for_simd));
- return new (Mem) OMPTargetParallelForSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTargetParallelForSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_target_parallel_for_simd),
+ CollapsedNum);
}
OMPTargetSimdDirective *
@@ -1722,15 +1408,10 @@ OMPTargetSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTargetSimdDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_target_simd));
- OMPTargetSimdDirective *Dir = new (Mem)
- OMPTargetSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTargetSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_target_simd), StartLoc, EndLoc,
+ CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1753,27 +1434,19 @@ OMPTargetSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
OMPTargetSimdDirective *
OMPTargetSimdDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTargetSimdDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_target_simd));
- return new (Mem) OMPTargetSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTargetSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_target_simd), CollapsedNum);
}
OMPTeamsDistributeDirective *OMPTeamsDistributeDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTeamsDistributeDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_teams_distribute));
- OMPTeamsDistributeDirective *Dir = new (Mem) OMPTeamsDistributeDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTeamsDistributeDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute), StartLoc, EndLoc,
+ CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1805,29 +1478,19 @@ OMPTeamsDistributeDirective *
OMPTeamsDistributeDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTeamsDistributeDirective), alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_teams_distribute));
- return new (Mem) OMPTeamsDistributeDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTeamsDistributeDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute), CollapsedNum);
}
OMPTeamsDistributeSimdDirective *OMPTeamsDistributeSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size = llvm::alignTo(sizeof(OMPTeamsDistributeSimdDirective),
- alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_teams_distribute_simd));
- OMPTeamsDistributeSimdDirective *Dir =
- new (Mem) OMPTeamsDistributeSimdDirective(StartLoc, EndLoc, CollapsedNum,
- Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTeamsDistributeSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute_simd), StartLoc,
+ EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1858,13 +1521,9 @@ OMPTeamsDistributeSimdDirective *OMPTeamsDistributeSimdDirective::Create(
OMPTeamsDistributeSimdDirective *OMPTeamsDistributeSimdDirective::CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPTeamsDistributeSimdDirective),
- alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_teams_distribute_simd));
- return new (Mem) OMPTeamsDistributeSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTeamsDistributeSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute_simd), CollapsedNum);
}
OMPTeamsDistributeParallelForSimdDirective *
@@ -1872,18 +1531,10 @@ OMPTeamsDistributeParallelForSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- auto Size = llvm::alignTo(sizeof(OMPTeamsDistributeParallelForSimdDirective),
- alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum,
- OMPD_teams_distribute_parallel_for_simd));
- OMPTeamsDistributeParallelForSimdDirective *Dir = new (Mem)
- OMPTeamsDistributeParallelForSimdDirective(StartLoc, EndLoc, CollapsedNum,
- Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTeamsDistributeParallelForSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute_parallel_for_simd),
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -1929,15 +1580,10 @@ OMPTeamsDistributeParallelForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- auto Size = llvm::alignTo(sizeof(OMPTeamsDistributeParallelForSimdDirective),
- alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum,
- OMPD_teams_distribute_parallel_for_simd));
- return new (Mem)
- OMPTeamsDistributeParallelForSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTeamsDistributeParallelForSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute_parallel_for_simd),
+ CollapsedNum);
}
OMPTeamsDistributeParallelForDirective *
@@ -1945,17 +1591,10 @@ OMPTeamsDistributeParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
- auto Size = llvm::alignTo(sizeof(OMPTeamsDistributeParallelForDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_teams_distribute_parallel_for));
- OMPTeamsDistributeParallelForDirective *Dir = new (Mem)
- OMPTeamsDistributeParallelForDirective(StartLoc, EndLoc, CollapsedNum,
- Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTeamsDistributeParallelForDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute_parallel_for) + 1,
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -2003,55 +1642,35 @@ OMPTeamsDistributeParallelForDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- auto Size = llvm::alignTo(sizeof(OMPTeamsDistributeParallelForDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_teams_distribute_parallel_for));
- return new (Mem)
- OMPTeamsDistributeParallelForDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTeamsDistributeParallelForDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute_parallel_for) + 1,
+ CollapsedNum);
}
OMPTargetTeamsDirective *OMPTargetTeamsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
- auto Size =
- llvm::alignTo(sizeof(OMPTargetTeamsDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTargetTeamsDirective *Dir =
- new (Mem) OMPTargetTeamsDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
+ return createDirective<OMPTargetTeamsDirective>(C, Clauses, AssociatedStmt,
+ /*NumChildren=*/0, StartLoc,
+ EndLoc);
}
OMPTargetTeamsDirective *
OMPTargetTeamsDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell) {
- auto Size =
- llvm::alignTo(sizeof(OMPTargetTeamsDirective), alignof(OMPClause *));
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPTargetTeamsDirective(NumClauses);
+ return createEmptyDirective<OMPTargetTeamsDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true);
}
OMPTargetTeamsDistributeDirective *OMPTargetTeamsDistributeDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- auto Size = llvm::alignTo(sizeof(OMPTargetTeamsDistributeDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_target_teams_distribute));
- OMPTargetTeamsDistributeDirective *Dir =
- new (Mem) OMPTargetTeamsDistributeDirective(StartLoc, EndLoc, CollapsedNum,
- Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTargetTeamsDistributeDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_target_teams_distribute), StartLoc,
+ EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -2084,13 +1703,10 @@ OMPTargetTeamsDistributeDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- auto Size = llvm::alignTo(sizeof(OMPTargetTeamsDistributeDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_target_teams_distribute));
- return new (Mem) OMPTargetTeamsDistributeDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTargetTeamsDistributeDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_target_teams_distribute),
+ CollapsedNum);
}
OMPTargetTeamsDistributeParallelForDirective *
@@ -2098,19 +1714,11 @@ OMPTargetTeamsDistributeParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
- auto Size =
- llvm::alignTo(sizeof(OMPTargetTeamsDistributeParallelForDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum,
- OMPD_target_teams_distribute_parallel_for));
- OMPTargetTeamsDistributeParallelForDirective *Dir =
- new (Mem) OMPTargetTeamsDistributeParallelForDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTargetTeamsDistributeParallelForDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_target_teams_distribute_parallel_for) +
+ 1,
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -2158,16 +1766,11 @@ OMPTargetTeamsDistributeParallelForDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- auto Size =
- llvm::alignTo(sizeof(OMPTargetTeamsDistributeParallelForDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum,
- OMPD_target_teams_distribute_parallel_for));
- return new (Mem)
- OMPTargetTeamsDistributeParallelForDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTargetTeamsDistributeParallelForDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_target_teams_distribute_parallel_for) +
+ 1,
+ CollapsedNum);
}
OMPTargetTeamsDistributeParallelForSimdDirective *
@@ -2175,19 +1778,11 @@ OMPTargetTeamsDistributeParallelForSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- auto Size =
- llvm::alignTo(sizeof(OMPTargetTeamsDistributeParallelForSimdDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum,
- OMPD_target_teams_distribute_parallel_for_simd));
- OMPTargetTeamsDistributeParallelForSimdDirective *Dir =
- new (Mem) OMPTargetTeamsDistributeParallelForSimdDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTargetTeamsDistributeParallelForSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum,
+ OMPD_target_teams_distribute_parallel_for_simd),
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -2232,16 +1827,11 @@ OMPTargetTeamsDistributeParallelForSimdDirective *
OMPTargetTeamsDistributeParallelForSimdDirective::CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell) {
- auto Size =
- llvm::alignTo(sizeof(OMPTargetTeamsDistributeParallelForSimdDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum,
- OMPD_target_teams_distribute_parallel_for_simd));
- return new (Mem) OMPTargetTeamsDistributeParallelForSimdDirective(
- CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTargetTeamsDistributeParallelForSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum,
+ OMPD_target_teams_distribute_parallel_for_simd),
+ CollapsedNum);
}
OMPTargetTeamsDistributeSimdDirective *
@@ -2249,17 +1839,10 @@ OMPTargetTeamsDistributeSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- auto Size = llvm::alignTo(sizeof(OMPTargetTeamsDistributeSimdDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_target_teams_distribute_simd));
- OMPTargetTeamsDistributeSimdDirective *Dir = new (Mem)
- OMPTargetTeamsDistributeSimdDirective(StartLoc, EndLoc, CollapsedNum,
- Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
+ auto *Dir = createDirective<OMPTargetTeamsDistributeSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_target_teams_distribute_simd),
+ StartLoc, EndLoc, CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
@@ -2292,12 +1875,8 @@ OMPTargetTeamsDistributeSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- auto Size = llvm::alignTo(sizeof(OMPTargetTeamsDistributeSimdDirective),
- alignof(OMPClause *));
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_target_teams_distribute_simd));
- return new (Mem)
- OMPTargetTeamsDistributeSimdDirective(CollapsedNum, NumClauses);
+ return createEmptyDirective<OMPTargetTeamsDistributeSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_target_teams_distribute_simd),
+ CollapsedNum);
}
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index f797f5fe8e6d..55a721194ccf 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -647,7 +647,7 @@ void StmtPrinter::PrintOMPExecutableDirective(OMPExecutableDirective *S,
}
OS << NL;
if (!ForceNoStmt && S->hasAssociatedStmt())
- PrintStmt(S->getInnermostCapturedStmt()->getCapturedStmt());
+ PrintStmt(S->getRawStmt());
}
void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) {
@@ -970,6 +970,10 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
OCED->getInit()->IgnoreImpCasts()->printPretty(OS, nullptr, Policy);
return;
}
+ if (const auto *TPOD = dyn_cast<TemplateParamObjectDecl>(Node->getDecl())) {
+ TPOD->printAsExpr(OS);
+ return;
+ }
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
if (Node->hasTemplateKeyword())
@@ -2005,8 +2009,23 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
if (C->isPackExpansion())
OS << "...";
- if (Node->isInitCapture(C))
- PrintExpr(C->getCapturedVar()->getInit());
+ if (Node->isInitCapture(C)) {
+ VarDecl *D = C->getCapturedVar();
+
+ llvm::StringRef Pre;
+ llvm::StringRef Post;
+ if (D->getInitStyle() == VarDecl::CallInit &&
+ !isa<ParenListExpr>(D->getInit())) {
+ Pre = "(";
+ Post = ")";
+ } else if (D->getInitStyle() == VarDecl::CInit) {
+ Pre = " = ";
+ }
+
+ OS << Pre;
+ PrintExpr(D->getInit());
+ OS << Post;
+ }
}
OS << ']';
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index bf3b43b816f1..de9de6ff463c 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -414,8 +414,9 @@ class OMPClauseProfiler : public ConstOMPClauseVisitor<OMPClauseProfiler> {
public:
OMPClauseProfiler(StmtProfiler *P) : Profiler(P) { }
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(const Class *C);
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(const Class *C);
+#include "llvm/Frontend/OpenMP/OMP.inc"
void VistOMPClauseWithPreInit(const OMPClauseWithPreInit *C);
void VistOMPClauseWithPostUpdate(const OMPClauseWithPostUpdate *C);
};
@@ -2193,6 +2194,8 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
break;
case TemplateArgument::Declaration:
+ VisitType(Arg.getParamTypeForDecl());
+ // FIXME: Do we need to recursively decompose template parameter objects?
VisitDecl(Arg.getAsDecl());
break;
@@ -2201,8 +2204,8 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
break;
case TemplateArgument::Integral:
- Arg.getAsIntegral().Profile(ID);
VisitType(Arg.getIntegralType());
+ Arg.getAsIntegral().Profile(ID);
break;
case TemplateArgument::Expression:
diff --git a/clang/lib/AST/TemplateBase.cpp b/clang/lib/AST/TemplateBase.cpp
index 6a3d2b30e46e..baf62bd115a8 100644
--- a/clang/lib/AST/TemplateBase.cpp
+++ b/clang/lib/AST/TemplateBase.cpp
@@ -244,6 +244,7 @@ void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
break;
case Declaration:
+ getParamTypeForDecl().Profile(ID);
ID.AddPointer(getAsDecl()? getAsDecl()->getCanonicalDecl() : nullptr);
break;
@@ -288,11 +289,14 @@ bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
case Null:
case Type:
case Expression:
- case Template:
- case TemplateExpansion:
case NullPtr:
return TypeOrValue.V == Other.TypeOrValue.V;
+ case Template:
+ case TemplateExpansion:
+ return TemplateArg.Name == Other.TemplateArg.Name &&
+ TemplateArg.NumExpansions == Other.TemplateArg.NumExpansions;
+
case Declaration:
return getAsDecl() == Other.getAsDecl();
@@ -352,6 +356,13 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
case Declaration: {
NamedDecl *ND = getAsDecl();
+ if (getParamTypeForDecl()->isRecordType()) {
+ if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
+ // FIXME: Include the type if it's not obvious from the context.
+ TPO->printAsInit(Out);
+ break;
+ }
+ }
if (!getParamTypeForDecl()->isReferenceType())
Out << '&';
ND->printQualifiedName(Out);
@@ -448,8 +459,8 @@ SourceRange TemplateArgumentLoc::getSourceRange() const {
llvm_unreachable("Invalid TemplateArgument Kind!");
}
-const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
- const TemplateArgument &Arg) {
+template <typename T>
+static const T &DiagTemplateArg(const T &DB, const TemplateArgument &Arg) {
switch (Arg.getKind()) {
case TemplateArgument::Null:
// This is bad, but not as bad as crashing because of argument
@@ -502,6 +513,22 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
llvm_unreachable("Invalid TemplateArgument Kind!");
}
+const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
+ const TemplateArgument &Arg) {
+ return DiagTemplateArg(DB, Arg);
+}
+
+clang::TemplateArgumentLocInfo::TemplateArgumentLocInfo(
+ ASTContext &Ctx, NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateNameLoc, SourceLocation EllipsisLoc) {
+ TemplateTemplateArgLocInfo *Template = new (Ctx) TemplateTemplateArgLocInfo;
+ Template->Qualifier = QualifierLoc.getNestedNameSpecifier();
+ Template->QualifierLocData = QualifierLoc.getOpaqueData();
+ Template->TemplateNameLoc = TemplateNameLoc;
+ Template->EllipsisLoc = EllipsisLoc;
+ Pointer = Template;
+}
+
const ASTTemplateArgumentListInfo *
ASTTemplateArgumentListInfo::Create(const ASTContext &C,
const TemplateArgumentListInfo &List) {
diff --git a/clang/lib/AST/TemplateName.cpp b/clang/lib/AST/TemplateName.cpp
index 40a8736ae1af..22cfa9acbe1b 100644
--- a/clang/lib/AST/TemplateName.cpp
+++ b/clang/lib/AST/TemplateName.cpp
@@ -254,8 +254,8 @@ TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
}
}
-const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
- TemplateName N) {
+const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
+ TemplateName N) {
std::string NameStr;
llvm::raw_string_ostream OS(NameStr);
LangOptions LO;
@@ -268,20 +268,6 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
return DB << NameStr;
}
-const PartialDiagnostic&clang::operator<<(const PartialDiagnostic &PD,
- TemplateName N) {
- std::string NameStr;
- llvm::raw_string_ostream OS(NameStr);
- LangOptions LO;
- LO.CPlusPlus = true;
- LO.Bool = true;
- OS << '\'';
- N.print(OS, PrintingPolicy(LO));
- OS << '\'';
- OS.flush();
- return PD << NameStr;
-}
-
void TemplateName::dump(raw_ostream &OS) const {
LangOptions LO; // FIXME!
LO.CPlusPlus = true;
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index 5b0a0ac392c0..e3132752546f 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -661,7 +661,7 @@ void TextNodeDumper::dumpBareDeclRef(const Decl *D) {
void TextNodeDumper::dumpName(const NamedDecl *ND) {
if (ND->getDeclName()) {
ColorScope Color(OS, ShowColors, DeclNameColor);
- OS << ' ' << ND->getNameAsString();
+ OS << ' ' << ND->getDeclName();
}
}
@@ -710,6 +710,13 @@ const char *TextNodeDumper::getCommandName(unsigned CommandID) {
return "<not a builtin command>";
}
+void TextNodeDumper::printFPOptions(FPOptionsOverride FPO) {
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ if (FPO.has##NAME##Override()) \
+ OS << " " #NAME "=" << FPO.get##NAME##Override();
+#include "clang/Basic/FPOptions.def"
+}
+
void TextNodeDumper::visitTextComment(const comments::TextComment *C,
const comments::FullComment *) {
OS << " Text=\"" << C->getText() << "\"";
@@ -937,6 +944,8 @@ void TextNodeDumper::VisitConstantExpr(const ConstantExpr *Node) {
void TextNodeDumper::VisitCallExpr(const CallExpr *Node) {
if (Node->usesADL())
OS << " adl";
+ if (Node->hasStoredFPFeatures())
+ printFPOptions(Node->getFPFeatures());
}
void TextNodeDumper::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *Node) {
@@ -955,6 +964,8 @@ void TextNodeDumper::VisitCastExpr(const CastExpr *Node) {
}
dumpBasePath(OS, Node);
OS << ">";
+ if (Node->hasStoredFPFeatures())
+ printFPOptions(Node->getFPFeatures());
}
void TextNodeDumper::VisitImplicitCastExpr(const ImplicitCastExpr *Node) {
@@ -1053,6 +1064,8 @@ void TextNodeDumper::VisitUnaryOperator(const UnaryOperator *Node) {
<< UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
if (!Node->canOverflow())
OS << " cannot overflow";
+ if (Node->hasStoredFPFeatures())
+ printFPOptions(Node->getStoredFPFeatures());
}
void TextNodeDumper::VisitUnaryExprOrTypeTraitExpr(
@@ -1081,6 +1094,8 @@ void TextNodeDumper::VisitExtVectorElementExpr(
void TextNodeDumper::VisitBinaryOperator(const BinaryOperator *Node) {
OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
+ if (Node->hasStoredFPFeatures())
+ printFPOptions(Node->getStoredFPFeatures());
}
void TextNodeDumper::VisitCompoundAssignOperator(
@@ -1090,6 +1105,8 @@ void TextNodeDumper::VisitCompoundAssignOperator(
dumpBareType(Node->getComputationLHSType());
OS << " ComputeResultTy=";
dumpBareType(Node->getComputationResultType());
+ if (Node->hasStoredFPFeatures())
+ printFPOptions(Node->getStoredFPFeatures());
}
void TextNodeDumper::VisitAddrLabelExpr(const AddrLabelExpr *Node) {
@@ -1119,6 +1136,14 @@ void TextNodeDumper::VisitCXXFunctionalCastExpr(
const CXXFunctionalCastExpr *Node) {
OS << " functional cast to " << Node->getTypeAsWritten().getAsString() << " <"
<< Node->getCastKindName() << ">";
+ if (Node->hasStoredFPFeatures())
+ printFPOptions(Node->getFPFeatures());
+}
+
+void TextNodeDumper::VisitCXXStaticCastExpr(const CXXStaticCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+ if (Node->hasStoredFPFeatures())
+ printFPOptions(Node->getFPFeatures());
}
void TextNodeDumper::VisitCXXUnresolvedConstructExpr(
@@ -1327,6 +1352,12 @@ void TextNodeDumper::VisitOMPIteratorExpr(const OMPIteratorExpr *Node) {
}
}
+void TextNodeDumper::VisitConceptSpecializationExpr(
+ const ConceptSpecializationExpr *Node) {
+ OS << " ";
+ dumpBareDeclRef(Node->getFoundDecl());
+}
+
void TextNodeDumper::VisitRValueReferenceType(const ReferenceType *T) {
if (T->isSpelledAsLValue())
OS << " written as lvalue reference";
@@ -1389,6 +1420,12 @@ void TextNodeDumper::VisitVectorType(const VectorType *T) {
case VectorType::NeonPolyVector:
OS << " neon poly";
break;
+ case VectorType::SveFixedLengthDataVector:
+ OS << " fixed-length sve data vector";
+ break;
+ case VectorType::SveFixedLengthPredicateVector:
+ OS << " fixed-length sve predicate vector";
+ break;
}
OS << " " << T->getNumElements();
}
@@ -1581,9 +1618,8 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
if (MD->size_overridden_methods() != 0) {
auto dumpOverride = [=](const CXXMethodDecl *D) {
SplitQualType T_split = D->getType().split();
- OS << D << " " << D->getParent()->getName()
- << "::" << D->getNameAsString() << " '"
- << QualType::getAsString(T_split, PrintPolicy) << "'";
+ OS << D << " " << D->getParent()->getName() << "::" << D->getDeclName()
+ << " '" << QualType::getAsString(T_split, PrintPolicy) << "'";
};
AddChild([=] {
@@ -1981,7 +2017,6 @@ void TextNodeDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
dumpBareDeclRef(TC->getFoundDecl());
OS << ")";
}
- Visit(TC->getImmediatelyDeclaredConstraint());
} else if (D->wasDeclaredWithTypename())
OS << " typename";
else
@@ -2013,7 +2048,7 @@ void TextNodeDumper::VisitUsingDecl(const UsingDecl *D) {
OS << ' ';
if (D->getQualifier())
D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
- OS << D->getNameAsString();
+ OS << D->getDeclName();
}
void TextNodeDumper::VisitUnresolvedUsingTypenameDecl(
@@ -2021,7 +2056,7 @@ void TextNodeDumper::VisitUnresolvedUsingTypenameDecl(
OS << ' ';
if (D->getQualifier())
D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
- OS << D->getNameAsString();
+ OS << D->getDeclName();
}
void TextNodeDumper::VisitUnresolvedUsingValueDecl(
@@ -2029,7 +2064,7 @@ void TextNodeDumper::VisitUnresolvedUsingValueDecl(
OS << ' ';
if (D->getQualifier())
D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
- OS << D->getNameAsString();
+ OS << D->getDeclName();
dumpType(D->getType());
}
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index 10a6a2610130..034e175f1352 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -308,10 +308,8 @@ ConstantMatrixType::ConstantMatrixType(QualType matrixType, unsigned nRows,
ConstantMatrixType::ConstantMatrixType(TypeClass tc, QualType matrixType,
unsigned nRows, unsigned nColumns,
QualType canonType)
- : MatrixType(tc, matrixType, canonType) {
- ConstantMatrixTypeBits.NumRows = nRows;
- ConstantMatrixTypeBits.NumColumns = nColumns;
-}
+ : MatrixType(tc, matrixType, canonType), NumRows(nRows),
+ NumColumns(nColumns) {}
DependentSizedMatrixType::DependentSizedMatrixType(
const ASTContext &CTX, QualType ElementType, QualType CanonicalType,
@@ -1187,9 +1185,6 @@ public:
T->getTypeConstraintArguments());
}
- // FIXME: Non-trivial to implement, but important for C++
- SUGARED_TYPE_CLASS(PackExpansion)
-
QualType VisitObjCObjectType(const ObjCObjectType *T) {
QualType baseType = recurse(T->getBaseType());
if (baseType.isNull())
@@ -2294,6 +2289,43 @@ bool Type::isSizelessBuiltinType() const {
bool Type::isSizelessType() const { return isSizelessBuiltinType(); }
+bool Type::isVLSTBuiltinType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ case BuiltinType::SveInt8:
+ case BuiltinType::SveInt16:
+ case BuiltinType::SveInt32:
+ case BuiltinType::SveInt64:
+ case BuiltinType::SveUint8:
+ case BuiltinType::SveUint16:
+ case BuiltinType::SveUint32:
+ case BuiltinType::SveUint64:
+ case BuiltinType::SveFloat16:
+ case BuiltinType::SveFloat32:
+ case BuiltinType::SveFloat64:
+ case BuiltinType::SveBFloat16:
+ case BuiltinType::SveBool:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+QualType Type::getSveEltType(const ASTContext &Ctx) const {
+ assert(isVLSTBuiltinType() && "unsupported type!");
+
+ const BuiltinType *BTy = getAs<BuiltinType>();
+ if (BTy->getKind() == BuiltinType::SveBool)
+ // Represent predicates as i8 rather than i1 to avoid any layout issues.
+ // The type is bitcasted to a scalable predicate type when casting between
+ // scalable and fixed-length vectors.
+ return Ctx.UnsignedCharTy;
+ else
+ return Ctx.getBuiltinVectorTypeInfo(BTy).ElementType;
+}
+
bool QualType::isPODType(const ASTContext &Context) const {
// C++11 has a more relaxed definition of POD.
if (Context.getLangOpts().CPlusPlus11)
@@ -2564,6 +2596,22 @@ bool Type::isLiteralType(const ASTContext &Ctx) const {
return false;
}
+bool Type::isStructuralType() const {
+ // C++20 [temp.param]p6:
+ // A structural type is one of the following:
+ // -- a scalar type; or
+ // -- a vector type [Clang extension]; or
+ if (isScalarType() || isVectorType())
+ return true;
+ // -- an lvalue reference type; or
+ if (isLValueReferenceType())
+ return true;
+ // -- a literal class type [...under some conditions]
+ if (const CXXRecordDecl *RD = getAsCXXRecordDecl())
+ return RD->isStructural();
+ return false;
+}
+
bool Type::isStandardLayoutType() const {
if (isDependentType())
return false;
@@ -3036,6 +3084,10 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
case Id: \
return Name;
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case Id: \
+ return #Name;
+#include "clang/Basic/PPCTypes.def"
}
llvm_unreachable("Invalid builtin type.");
@@ -3317,6 +3369,13 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
getExtProtoInfo(), Ctx, isCanonicalUnqualified());
}
+TypedefType::TypedefType(TypeClass tc, const TypedefNameDecl *D,
+ QualType underlying, QualType can)
+ : Type(tc, can, underlying->getDependence()),
+ Decl(const_cast<TypedefNameDecl *>(D)) {
+ assert(!isa<TypedefType>(can) && "Invalid canonical type");
+}
+
QualType TypedefType::desugar() const {
return getDecl()->getUnderlyingType();
}
@@ -3454,6 +3513,7 @@ bool AttributedType::isQualifier() const {
case attr::ObjCInertUnsafeUnretained:
case attr::TypeNonNull:
case attr::TypeNullable:
+ case attr::TypeNullableResult:
case attr::TypeNullUnspecified:
case attr::LifetimeBound:
case attr::AddressSpace:
@@ -3538,24 +3598,24 @@ void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID,
ID.AddPointer(P.getAsType().getAsOpaquePtr());
}
-bool TemplateSpecializationType::
-anyDependentTemplateArguments(const TemplateArgumentListInfo &Args,
- bool &InstantiationDependent) {
- return anyDependentTemplateArguments(Args.arguments(),
- InstantiationDependent);
+bool TemplateSpecializationType::anyDependentTemplateArguments(
+ const TemplateArgumentListInfo &Args, ArrayRef<TemplateArgument> Converted) {
+ return anyDependentTemplateArguments(Args.arguments(), Converted);
}
-bool TemplateSpecializationType::
-anyDependentTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
- bool &InstantiationDependent) {
- for (const TemplateArgumentLoc &ArgLoc : Args) {
- if (ArgLoc.getArgument().isDependent()) {
- InstantiationDependent = true;
+bool TemplateSpecializationType::anyDependentTemplateArguments(
+ ArrayRef<TemplateArgumentLoc> Args, ArrayRef<TemplateArgument> Converted) {
+ for (const TemplateArgument &Arg : Converted)
+ if (Arg.isDependent())
return true;
- }
+ return false;
+}
+bool TemplateSpecializationType::anyInstantiationDependentTemplateArguments(
+ ArrayRef<TemplateArgumentLoc> Args) {
+ for (const TemplateArgumentLoc &ArgLoc : Args) {
if (ArgLoc.getArgument().isInstantiationDependent())
- InstantiationDependent = true;
+ return true;
}
return false;
}
@@ -4047,6 +4107,9 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::NullPtr:
case BuiltinType::IncompleteMatrixIdx:
@@ -4098,6 +4161,8 @@ AttributedType::getImmediateNullability() const {
return NullabilityKind::Nullable;
if (getAttrKind() == attr::TypeNullUnspecified)
return NullabilityKind::Unspecified;
+ if (getAttrKind() == attr::TypeNullableResult)
+ return NullabilityKind::NullableResult;
return None;
}
@@ -4305,10 +4370,10 @@ CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const {
void clang::FixedPointValueToString(SmallVectorImpl<char> &Str,
llvm::APSInt Val, unsigned Scale) {
- FixedPointSemantics FXSema(Val.getBitWidth(), Scale, Val.isSigned(),
- /*IsSaturated=*/false,
- /*HasUnsignedPadding=*/false);
- APFixedPoint(Val, FXSema).toString(Str);
+ llvm::FixedPointSemantics FXSema(Val.getBitWidth(), Scale, Val.isSigned(),
+ /*IsSaturated=*/false,
+ /*HasUnsignedPadding=*/false);
+ llvm::APFixedPoint(Val, FXSema).toString(Str);
}
AutoType::AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
diff --git a/clang/lib/AST/TypeLoc.cpp b/clang/lib/AST/TypeLoc.cpp
index 57c11ca5571d..222b1abac510 100644
--- a/clang/lib/AST/TypeLoc.cpp
+++ b/clang/lib/AST/TypeLoc.cpp
@@ -403,6 +403,9 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
@@ -582,7 +585,7 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
ArgInfos[i] = TemplateArgumentLocInfo(
- Builder.getWithLocInContext(Context), Loc,
+ Context, Builder.getWithLocInContext(Context), Loc,
Args[i].getKind() == TemplateArgument::Template ? SourceLocation()
: Loc);
break;
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index 6f6932e65214..25d7874b53fb 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
@@ -19,6 +19,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -115,10 +116,13 @@ namespace {
static bool canPrefixQualifiers(const Type *T, bool &NeedARCStrongQualifier);
void spaceBeforePlaceHolder(raw_ostream &OS);
void printTypeSpec(NamedDecl *D, raw_ostream &OS);
+ void printTemplateId(const TemplateSpecializationType *T, raw_ostream &OS,
+ bool FullyQualify);
void printBefore(QualType T, raw_ostream &OS);
void printAfter(QualType T, raw_ostream &OS);
- void AppendScope(DeclContext *DC, raw_ostream &OS);
+ void AppendScope(DeclContext *DC, raw_ostream &OS,
+ DeclarationName NameInScope);
void printTag(TagDecl *T, raw_ostream &OS);
void printFunctionAfter(const FunctionType::ExtInfo &Info, raw_ostream &OS);
#define ABSTRACT_TYPE(CLASS, PARENT)
@@ -655,6 +659,24 @@ void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) {
printBefore(T->getElementType(), OS);
break;
}
+ case VectorType::SveFixedLengthDataVector:
+ case VectorType::SveFixedLengthPredicateVector:
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ OS << "__attribute__((__arm_sve_vector_bits__(";
+
+ if (T->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ // Predicates take a bit per byte of the vector size, multiply by 8 to
+ // get the number of bits passed to the attribute.
+ OS << T->getNumElements() * 8;
+ else
+ OS << T->getNumElements();
+
+ OS << " * sizeof(";
+ print(T->getElementType(), OS, StringRef());
+ // Multiply by 8 for the number of bits.
+ OS << ") * 8))) ";
+ printBefore(T->getElementType(), OS);
}
}
@@ -702,6 +724,24 @@ void TypePrinter::printDependentVectorBefore(
printBefore(T->getElementType(), OS);
break;
}
+ case VectorType::SveFixedLengthDataVector:
+ case VectorType::SveFixedLengthPredicateVector:
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ OS << "__attribute__((__arm_sve_vector_bits__(";
+ if (T->getSizeExpr()) {
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ if (T->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ // Predicates take a bit per byte of the vector size, multiply by 8 to
+ // get the number of bits passed to the attribute.
+ OS << " * 8";
+ OS << " * sizeof(";
+ print(T->getElementType(), OS, StringRef());
+ // Multiply by 8 for the number of bits.
+ OS << ") * 8";
+ }
+ OS << "))) ";
+ printBefore(T->getElementType(), OS);
}
}
@@ -985,7 +1025,7 @@ void TypePrinter::printTypeSpec(NamedDecl *D, raw_ostream &OS) {
// In C, this will always be empty except when the type
// being printed is anonymous within other Record.
if (!Policy.SuppressScope)
- AppendScope(D->getDeclContext(), OS);
+ AppendScope(D->getDeclContext(), OS, D->getDeclName());
IdentifierInfo *II = D->getIdentifier();
OS << II->getName();
@@ -1088,7 +1128,9 @@ void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) {
OS << T->getTypeConstraintConcept()->getName();
auto Args = T->getTypeConstraintArguments();
if (!Args.empty())
- printTemplateArgumentList(OS, Args, Policy);
+ printTemplateArgumentList(
+ OS, Args, Policy,
+ T->getTypeConstraintConcept()->getTemplateParameters());
OS << ' ';
}
switch (T->getKeyword()) {
@@ -1173,32 +1215,54 @@ void TypePrinter::printDependentExtIntAfter(const DependentExtIntType *T,
raw_ostream &OS) {}
/// Appends the given scope to the end of a string.
-void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS) {
- if (DC->isTranslationUnit()) return;
- if (DC->isFunctionOrMethod()) return;
- AppendScope(DC->getParent(), OS);
+void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS,
+ DeclarationName NameInScope) {
+ if (DC->isTranslationUnit())
+ return;
+
+ // FIXME: Consider replacing this with NamedDecl::printNestedNameSpecifier,
+ // which can also print names for function and method scopes.
+ if (DC->isFunctionOrMethod())
+ return;
+
+ if (Policy.Callbacks && Policy.Callbacks->isScopeVisible(DC))
+ return;
if (const auto *NS = dyn_cast<NamespaceDecl>(DC)) {
- if (Policy.SuppressUnwrittenScope &&
- (NS->isAnonymousNamespace() || NS->isInline()))
- return;
+ if (Policy.SuppressUnwrittenScope && NS->isAnonymousNamespace())
+ return AppendScope(DC->getParent(), OS, NameInScope);
+
+ // Only suppress an inline namespace if the name has the same lookup
+ // results in the enclosing namespace.
+ if (Policy.SuppressInlineNamespace && NS->isInline() && NameInScope &&
+ DC->getParent()->lookup(NameInScope).size() ==
+ DC->lookup(NameInScope).size())
+ return AppendScope(DC->getParent(), OS, NameInScope);
+
+ AppendScope(DC->getParent(), OS, NS->getDeclName());
if (NS->getIdentifier())
OS << NS->getName() << "::";
else
OS << "(anonymous namespace)::";
} else if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
+ AppendScope(DC->getParent(), OS, Spec->getDeclName());
IncludeStrongLifetimeRAII Strong(Policy);
OS << Spec->getIdentifier()->getName();
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
- printTemplateArgumentList(OS, TemplateArgs.asArray(), Policy);
+ printTemplateArgumentList(
+ OS, TemplateArgs.asArray(), Policy,
+ Spec->getSpecializedTemplate()->getTemplateParameters());
OS << "::";
} else if (const auto *Tag = dyn_cast<TagDecl>(DC)) {
+ AppendScope(DC->getParent(), OS, Tag->getDeclName());
if (TypedefNameDecl *Typedef = Tag->getTypedefNameForAnonDecl())
OS << Typedef->getIdentifier()->getName() << "::";
else if (Tag->getIdentifier())
OS << Tag->getIdentifier()->getName() << "::";
else
return;
+ } else {
+ AppendScope(DC->getParent(), OS, NameInScope);
}
}
@@ -1225,7 +1289,7 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
// In C, this will always be empty except when the type
// being printed is anonymous within other Record.
if (!Policy.SuppressScope)
- AppendScope(D->getDeclContext(), OS);
+ AppendScope(D->getDeclContext(), OS, D->getDeclName());
if (const IdentifierInfo *II = D->getIdentifier())
OS << II->getName();
@@ -1281,13 +1345,31 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
Args = TemplateArgs.asArray();
}
IncludeStrongLifetimeRAII Strong(Policy);
- printTemplateArgumentList(OS, Args, Policy);
+ printTemplateArgumentList(
+ OS, Args, Policy,
+ Spec->getSpecializedTemplate()->getTemplateParameters());
}
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) {
+ // Print the preferred name if we have one for this type.
+ for (const auto *PNA : T->getDecl()->specific_attrs<PreferredNameAttr>()) {
+ if (declaresSameEntity(PNA->getTypedefType()->getAsCXXRecordDecl(),
+ T->getDecl())) {
+ // Find the outermost typedef or alias template.
+ QualType T = PNA->getTypedefType();
+ while (true) {
+ if (auto *TT = dyn_cast<TypedefType>(T))
+ return printTypeSpec(TT->getDecl(), OS);
+ if (auto *TST = dyn_cast<TemplateSpecializationType>(T))
+ return printTemplateId(TST, OS, /*FullyQualify=*/true);
+ T = T->getLocallyUnqualifiedSingleStepDesugaredType();
+ }
+ }
+ }
+
printTag(T->getDecl(), OS);
}
@@ -1347,16 +1429,32 @@ void TypePrinter::printSubstTemplateTypeParmPackAfter(
printTemplateTypeParmAfter(T->getReplacedParameter(), OS);
}
-void TypePrinter::printTemplateSpecializationBefore(
- const TemplateSpecializationType *T,
- raw_ostream &OS) {
+void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
+ raw_ostream &OS, bool FullyQualify) {
IncludeStrongLifetimeRAII Strong(Policy);
- T->getTemplateName().print(OS, Policy);
- printTemplateArgumentList(OS, T->template_arguments(), Policy);
+ TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl();
+ if (FullyQualify && TD) {
+ if (!Policy.SuppressScope)
+ AppendScope(TD->getDeclContext(), OS, TD->getDeclName());
+
+ IdentifierInfo *II = TD->getIdentifier();
+ OS << II->getName();
+ } else {
+ T->getTemplateName().print(OS, Policy);
+ }
+
+ const TemplateParameterList *TPL = TD ? TD->getTemplateParameters() : nullptr;
+ printTemplateArgumentList(OS, T->template_arguments(), Policy, TPL);
spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printTemplateSpecializationBefore(
+ const TemplateSpecializationType *T,
+ raw_ostream &OS) {
+ printTemplateId(T, OS, false);
+}
+
void TypePrinter::printTemplateSpecializationAfter(
const TemplateSpecializationType *T,
raw_ostream &OS) {}
@@ -1506,6 +1604,8 @@ void TypePrinter::printAttributedBefore(const AttributedType *T,
OS << " _Nullable";
else if (T->getAttrKind() == attr::TypeNullUnspecified)
OS << " _Null_unspecified";
+ else if (T->getAttrKind() == attr::TypeNullableResult)
+ OS << " _Nullable_result";
else
llvm_unreachable("unhandled nullability");
spaceBeforePlaceHolder(OS);
@@ -1564,6 +1664,8 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::OpenCLPrivateAddressSpace:
case attr::OpenCLGlobalAddressSpace:
+ case attr::OpenCLGlobalDeviceAddressSpace:
+ case attr::OpenCLGlobalHostAddressSpace:
case attr::OpenCLLocalAddressSpace:
case attr::OpenCLConstantAddressSpace:
case attr::OpenCLGenericAddressSpace:
@@ -1574,6 +1676,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::LifetimeBound:
case attr::TypeNonNull:
case attr::TypeNullable:
+ case attr::TypeNullableResult:
case attr::TypeNullUnspecified:
case attr::ObjCGC:
case attr::ObjCInertUnsafeUnretained:
@@ -1751,9 +1854,159 @@ static void printArgument(const TemplateArgumentLoc &A,
return A.getArgument().print(PP, OS);
}
+static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg,
+ TemplateArgument Pattern,
+ ArrayRef<TemplateArgument> Args,
+ unsigned Depth);
+
+static bool isSubstitutedType(ASTContext &Ctx, QualType T, QualType Pattern,
+ ArrayRef<TemplateArgument> Args, unsigned Depth) {
+ if (Ctx.hasSameType(T, Pattern))
+ return true;
+
+ // A type parameter matches its argument.
+ if (auto *TTPT = Pattern->getAs<TemplateTypeParmType>()) {
+ if (TTPT->getDepth() == Depth && TTPT->getIndex() < Args.size() &&
+ Args[TTPT->getIndex()].getKind() == TemplateArgument::Type) {
+ QualType SubstArg = Ctx.getQualifiedType(
+ Args[TTPT->getIndex()].getAsType(), Pattern.getQualifiers());
+ return Ctx.hasSameType(SubstArg, T);
+ }
+ return false;
+ }
+
+ // FIXME: Recurse into array types.
+
+ // All other cases will need the types to be identically qualified.
+ Qualifiers TQual, PatQual;
+ T = Ctx.getUnqualifiedArrayType(T, TQual);
+ Pattern = Ctx.getUnqualifiedArrayType(Pattern, PatQual);
+ if (TQual != PatQual)
+ return false;
+
+ // Recurse into pointer-like types.
+ {
+ QualType TPointee = T->getPointeeType();
+ QualType PPointee = Pattern->getPointeeType();
+ if (!TPointee.isNull() && !PPointee.isNull())
+ return T->getTypeClass() == Pattern->getTypeClass() &&
+ isSubstitutedType(Ctx, TPointee, PPointee, Args, Depth);
+ }
+
+ // Recurse into template specialization types.
+ if (auto *PTST =
+ Pattern.getCanonicalType()->getAs<TemplateSpecializationType>()) {
+ TemplateName Template;
+ ArrayRef<TemplateArgument> TemplateArgs;
+ if (auto *TTST = T->getAs<TemplateSpecializationType>()) {
+ Template = TTST->getTemplateName();
+ TemplateArgs = TTST->template_arguments();
+ } else if (auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
+ T->getAsCXXRecordDecl())) {
+ Template = TemplateName(CTSD->getSpecializedTemplate());
+ TemplateArgs = CTSD->getTemplateArgs().asArray();
+ } else {
+ return false;
+ }
+
+ if (!isSubstitutedTemplateArgument(Ctx, Template, PTST->getTemplateName(),
+ Args, Depth))
+ return false;
+ if (TemplateArgs.size() != PTST->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ if (!isSubstitutedTemplateArgument(Ctx, TemplateArgs[I], PTST->getArg(I),
+ Args, Depth))
+ return false;
+ return true;
+ }
+
+ // FIXME: Handle more cases.
+ return false;
+}
+
+static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg,
+ TemplateArgument Pattern,
+ ArrayRef<TemplateArgument> Args,
+ unsigned Depth) {
+ Arg = Ctx.getCanonicalTemplateArgument(Arg);
+ Pattern = Ctx.getCanonicalTemplateArgument(Pattern);
+ if (Arg.structurallyEquals(Pattern))
+ return true;
+
+ if (Pattern.getKind() == TemplateArgument::Expression) {
+ if (auto *DRE =
+ dyn_cast<DeclRefExpr>(Pattern.getAsExpr()->IgnoreParenImpCasts())) {
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl()))
+ return NTTP->getDepth() == Depth && Args.size() > NTTP->getIndex() &&
+ Args[NTTP->getIndex()].structurallyEquals(Arg);
+ }
+ }
+
+ if (Arg.getKind() != Pattern.getKind())
+ return false;
+
+ if (Arg.getKind() == TemplateArgument::Type)
+ return isSubstitutedType(Ctx, Arg.getAsType(), Pattern.getAsType(), Args,
+ Depth);
+
+ if (Arg.getKind() == TemplateArgument::Template) {
+ TemplateDecl *PatTD = Pattern.getAsTemplate().getAsTemplateDecl();
+ if (auto *TTPD = dyn_cast_or_null<TemplateTemplateParmDecl>(PatTD))
+ return TTPD->getDepth() == Depth && Args.size() > TTPD->getIndex() &&
+ Ctx.getCanonicalTemplateArgument(Args[TTPD->getIndex()])
+ .structurallyEquals(Arg);
+ }
+
+ // FIXME: Handle more cases.
+ return false;
+}
+
+/// Make a best-effort determination of whether the type T can be produced by
+/// substituting Args into the default argument of Param.
+static bool isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
+ const NamedDecl *Param,
+ ArrayRef<TemplateArgument> Args,
+ unsigned Depth) {
+ // An empty pack is equivalent to not providing a pack argument.
+ if (Arg.getKind() == TemplateArgument::Pack && Arg.pack_size() == 0)
+ return true;
+
+ if (auto *TTPD = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ return TTPD->hasDefaultArgument() &&
+ isSubstitutedTemplateArgument(Ctx, Arg, TTPD->getDefaultArgument(),
+ Args, Depth);
+ } else if (auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ return TTPD->hasDefaultArgument() &&
+ isSubstitutedTemplateArgument(
+ Ctx, Arg, TTPD->getDefaultArgument().getArgument(), Args, Depth);
+ } else if (auto *NTTPD = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ return NTTPD->hasDefaultArgument() &&
+ isSubstitutedTemplateArgument(Ctx, Arg, NTTPD->getDefaultArgument(),
+ Args, Depth);
+ }
+ return false;
+}
+
template<typename TA>
static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
- const PrintingPolicy &Policy, bool SkipBrackets) {
+ const PrintingPolicy &Policy, bool SkipBrackets,
+ const TemplateParameterList *TPL) {
+ // Drop trailing template arguments that match default arguments.
+ if (TPL && Policy.SuppressDefaultTemplateArgs &&
+ !Policy.PrintCanonicalTypes && !Args.empty() &&
+ Args.size() <= TPL->size()) {
+ ASTContext &Ctx = TPL->getParam(0)->getASTContext();
+ llvm::SmallVector<TemplateArgument, 8> OrigArgs;
+ for (const TA &A : Args)
+ OrigArgs.push_back(getArgument(A));
+ while (!Args.empty() &&
+ isSubstitutedDefaultArgument(Ctx, getArgument(Args.back()),
+ TPL->getParam(Args.size() - 1),
+ OrigArgs, TPL->getDepth()))
+ Args = Args.drop_back();
+ }
+
const char *Comma = Policy.MSVCFormatting ? "," : ", ";
if (!SkipBrackets)
OS << '<';
@@ -1768,7 +2021,7 @@ static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
if (Argument.getKind() == TemplateArgument::Pack) {
if (Argument.pack_size() && !FirstArg)
OS << Comma;
- printTo(ArgOS, Argument.getPackAsArray(), Policy, true);
+ printTo(ArgOS, Argument.getPackAsArray(), Policy, true, nullptr);
} else {
if (!FirstArg)
OS << Comma;
@@ -1801,20 +2054,23 @@ static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
void clang::printTemplateArgumentList(raw_ostream &OS,
const TemplateArgumentListInfo &Args,
- const PrintingPolicy &Policy) {
- return printTo(OS, Args.arguments(), Policy, false);
+ const PrintingPolicy &Policy,
+ const TemplateParameterList *TPL) {
+ printTemplateArgumentList(OS, Args.arguments(), Policy, TPL);
}
void clang::printTemplateArgumentList(raw_ostream &OS,
ArrayRef<TemplateArgument> Args,
- const PrintingPolicy &Policy) {
- printTo(OS, Args, Policy, false);
+ const PrintingPolicy &Policy,
+ const TemplateParameterList *TPL) {
+ printTo(OS, Args, Policy, false, TPL);
}
void clang::printTemplateArgumentList(raw_ostream &OS,
ArrayRef<TemplateArgumentLoc> Args,
- const PrintingPolicy &Policy) {
- printTo(OS, Args, Policy, false);
+ const PrintingPolicy &Policy,
+ const TemplateParameterList *TPL) {
+ printTo(OS, Args, Policy, false, TPL);
}
std::string Qualifiers::getAsString() const {
@@ -1863,6 +2119,10 @@ std::string Qualifiers::getAddrSpaceAsString(LangAS AS) {
return "__constant";
case LangAS::opencl_generic:
return "__generic";
+ case LangAS::opencl_global_device:
+ return "__global_device";
+ case LangAS::opencl_global_host:
+ return "__global_host";
case LangAS::cuda_device:
return "__device__";
case LangAS::cuda_constant:
diff --git a/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/clang/lib/ASTMatchers/ASTMatchFinder.cpp
index e88da16dd3d4..8ddd3c87e09d 100644
--- a/clang/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -95,9 +95,11 @@ public:
// matching the descendants.
MatchChildASTVisitor(const DynTypedMatcher *Matcher, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- TraversalKind Traversal, ASTMatchFinder::BindKind Bind)
+ bool IgnoreImplicitChildren,
+ ASTMatchFinder::BindKind Bind)
: Matcher(Matcher), Finder(Finder), Builder(Builder), CurrentDepth(0),
- MaxDepth(MaxDepth), Traversal(Traversal), Bind(Bind), Matches(false) {}
+ MaxDepth(MaxDepth), IgnoreImplicitChildren(IgnoreImplicitChildren),
+ Bind(Bind), Matches(false) {}
// Returns true if a match is found in the subtree rooted at the
// given AST node. This is done via a set of mutually recursive
@@ -128,6 +130,9 @@ public:
traverse(*T);
else if (const auto *C = DynNode.get<CXXCtorInitializer>())
traverse(*C);
+ else if (const TemplateArgumentLoc *TALoc =
+ DynNode.get<TemplateArgumentLoc>())
+ traverse(*TALoc);
// FIXME: Add other base types after adding tests.
// It's OK to always overwrite the bound nodes, as if there was
@@ -142,6 +147,11 @@ public:
// They are public only to allow CRTP to work. They are *not *part
// of the public API of this class.
bool TraverseDecl(Decl *DeclNode) {
+
+ if (DeclNode && DeclNode->isImplicit() &&
+ Finder->isTraversalIgnoringImplicitNodes())
+ return baseTraverse(*DeclNode);
+
ScopedIncrement ScopedDepth(&CurrentDepth);
return (DeclNode == nullptr) || traverse(*DeclNode);
}
@@ -150,19 +160,13 @@ public:
Stmt *StmtToTraverse = StmtNode;
if (auto *ExprNode = dyn_cast_or_null<Expr>(StmtNode)) {
auto *LambdaNode = dyn_cast_or_null<LambdaExpr>(StmtNode);
- if (LambdaNode &&
- Finder->getASTContext().getParentMapContext().getTraversalKind() ==
- TK_IgnoreUnlessSpelledInSource)
+ if (LambdaNode && Finder->isTraversalIgnoringImplicitNodes())
StmtToTraverse = LambdaNode;
else
StmtToTraverse =
Finder->getASTContext().getParentMapContext().traverseIgnored(
ExprNode);
}
- if (Traversal == TraversalKind::TK_IgnoreImplicitCastsAndParentheses) {
- if (Expr *ExprNode = dyn_cast_or_null<Expr>(StmtNode))
- StmtToTraverse = ExprNode->IgnoreParenImpCasts();
- }
return StmtToTraverse;
}
@@ -175,6 +179,10 @@ public:
Stmt *StmtToTraverse = getStmtToTraverse(StmtNode);
if (!StmtToTraverse)
return true;
+
+ if (IgnoreImplicitChildren && isa<CXXDefaultArgExpr>(StmtNode))
+ return true;
+
if (!match(*StmtToTraverse))
return false;
return VisitorBase::TraverseStmt(StmtToTraverse, Queue);
@@ -224,9 +232,35 @@ public:
ScopedIncrement ScopedDepth(&CurrentDepth);
return traverse(*CtorInit);
}
+ bool TraverseTemplateArgumentLoc(TemplateArgumentLoc TAL) {
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ return traverse(TAL);
+ }
+ bool TraverseCXXForRangeStmt(CXXForRangeStmt *Node) {
+ if (!Finder->isTraversalIgnoringImplicitNodes())
+ return VisitorBase::TraverseCXXForRangeStmt(Node);
+ if (!Node)
+ return true;
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ if (auto *Init = Node->getInit())
+ if (!match(*Init))
+ return false;
+ if (!match(*Node->getLoopVariable()) || !match(*Node->getRangeInit()) ||
+ !match(*Node->getBody()))
+ return false;
+ return VisitorBase::TraverseStmt(Node->getBody());
+ }
+ bool TraverseCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *Node) {
+ if (!Finder->isTraversalIgnoringImplicitNodes())
+ return VisitorBase::TraverseCXXRewrittenBinaryOperator(Node);
+ if (!Node)
+ return true;
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+
+ return match(*Node->getLHS()) && match(*Node->getRHS());
+ }
bool TraverseLambdaExpr(LambdaExpr *Node) {
- if (Finder->getASTContext().getParentMapContext().getTraversalKind() !=
- TK_IgnoreUnlessSpelledInSource)
+ if (!Finder->isTraversalIgnoringImplicitNodes())
return VisitorBase::TraverseLambdaExpr(Node);
if (!Node)
return true;
@@ -261,7 +295,7 @@ public:
}
bool shouldVisitTemplateInstantiations() const { return true; }
- bool shouldVisitImplicitCode() const { return true; }
+ bool shouldVisitImplicitCode() const { return !IgnoreImplicitChildren; }
private:
// Used for updating the depth during traversal.
@@ -304,6 +338,9 @@ private:
return VisitorBase::TraverseConstructorInitializer(
const_cast<CXXCtorInitializer *>(&CtorInit));
}
+ bool baseTraverse(TemplateArgumentLoc TAL) {
+ return VisitorBase::TraverseTemplateArgumentLoc(TAL);
+ }
// Sets 'Matched' to true if 'Matcher' matches 'Node' and:
// 0 < CurrentDepth <= MaxDepth.
@@ -352,7 +389,7 @@ private:
BoundNodesTreeBuilder ResultBindings;
int CurrentDepth;
const int MaxDepth;
- const TraversalKind Traversal;
+ const bool IgnoreImplicitChildren;
const ASTMatchFinder::BindKind Bind;
bool Matches;
};
@@ -447,16 +484,94 @@ public:
bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
bool TraverseConstructorInitializer(CXXCtorInitializer *CtorInit);
+ bool TraverseTemplateArgumentLoc(TemplateArgumentLoc TAL);
+
+ bool dataTraverseNode(Stmt *S, DataRecursionQueue *Queue) {
+ if (auto *RF = dyn_cast<CXXForRangeStmt>(S)) {
+ for (auto *SubStmt : RF->children()) {
+ if (SubStmt == RF->getInit() || SubStmt == RF->getLoopVarStmt() ||
+ SubStmt == RF->getRangeInit() || SubStmt == RF->getBody()) {
+ TraverseStmt(SubStmt, Queue);
+ } else {
+ ASTNodeNotSpelledInSourceScope RAII(this, true);
+ TraverseStmt(SubStmt, Queue);
+ }
+ }
+ return true;
+ } else if (auto *RBO = dyn_cast<CXXRewrittenBinaryOperator>(S)) {
+ {
+ ASTNodeNotAsIsSourceScope RAII(this, true);
+ TraverseStmt(const_cast<Expr *>(RBO->getLHS()));
+ TraverseStmt(const_cast<Expr *>(RBO->getRHS()));
+ }
+ {
+ ASTNodeNotSpelledInSourceScope RAII(this, true);
+ for (auto *SubStmt : RBO->children()) {
+ TraverseStmt(SubStmt);
+ }
+ }
+ return true;
+ } else if (auto *LE = dyn_cast<LambdaExpr>(S)) {
+ for (auto I : llvm::zip(LE->captures(), LE->capture_inits())) {
+ auto C = std::get<0>(I);
+ ASTNodeNotSpelledInSourceScope RAII(
+ this, TraversingASTNodeNotSpelledInSource || !C.isExplicit());
+ TraverseLambdaCapture(LE, &C, std::get<1>(I));
+ }
+
+ {
+ ASTNodeNotSpelledInSourceScope RAII(this, true);
+ TraverseDecl(LE->getLambdaClass());
+ }
+ {
+ ASTNodeNotAsIsSourceScope RAII(this, true);
+
+ // We need to poke around to find the bits that might be explicitly
+ // written.
+ TypeLoc TL = LE->getCallOperator()->getTypeSourceInfo()->getTypeLoc();
+ FunctionProtoTypeLoc Proto = TL.getAsAdjusted<FunctionProtoTypeLoc>();
+
+ if (auto *TPL = LE->getTemplateParameterList()) {
+ for (NamedDecl *D : *TPL) {
+ TraverseDecl(D);
+ }
+ if (Expr *RequiresClause = TPL->getRequiresClause()) {
+ TraverseStmt(RequiresClause);
+ }
+ }
+
+ if (LE->hasExplicitParameters()) {
+ // Visit parameters.
+ for (ParmVarDecl *Param : Proto.getParams())
+ TraverseDecl(Param);
+ }
+
+ const auto *T = Proto.getTypePtr();
+ for (const auto &E : T->exceptions())
+ TraverseType(E);
+
+ if (Expr *NE = T->getNoexceptExpr())
+ TraverseStmt(NE, Queue);
+
+ if (LE->hasExplicitResultType())
+ TraverseTypeLoc(Proto.getReturnLoc());
+ TraverseStmt(LE->getTrailingRequiresClause());
+
+ TraverseStmt(LE->getBody());
+ }
+ return true;
+ }
+ return RecursiveASTVisitor<MatchASTVisitor>::dataTraverseNode(S, Queue);
+ }
// Matches children or descendants of 'Node' with 'BaseMatcher'.
bool memoizedMatchesRecursively(const DynTypedNode &Node, ASTContext &Ctx,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- TraversalKind Traversal, BindKind Bind) {
+ BindKind Bind) {
// For AST-nodes that don't have an identity, we can't memoize.
if (!Node.getMemoizationData() || !Builder->isComparable())
- return matchesRecursively(Node, Matcher, Builder, MaxDepth, Traversal,
- Bind);
+ return matchesRecursively(Node, Matcher, Builder, MaxDepth, Bind);
MatchKey Key;
Key.MatcherID = Matcher.getID();
@@ -474,8 +589,8 @@ public:
MemoizedMatchResult Result;
Result.Nodes = *Builder;
- Result.ResultOfMatch = matchesRecursively(Node, Matcher, &Result.Nodes,
- MaxDepth, Traversal, Bind);
+ Result.ResultOfMatch =
+ matchesRecursively(Node, Matcher, &Result.Nodes, MaxDepth, Bind);
MemoizedMatchResult &CachedResult = ResultCache[Key];
CachedResult = std::move(Result);
@@ -488,9 +603,20 @@ public:
bool matchesRecursively(const DynTypedNode &Node,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- TraversalKind Traversal, BindKind Bind) {
- MatchChildASTVisitor Visitor(
- &Matcher, this, Builder, MaxDepth, Traversal, Bind);
+ BindKind Bind) {
+ bool ScopedTraversal = TraversingASTNodeNotSpelledInSource ||
+ TraversingASTChildrenNotSpelledInSource;
+
+ bool IgnoreImplicitChildren = false;
+
+ if (isTraversalIgnoringImplicitNodes()) {
+ IgnoreImplicitChildren = true;
+ }
+
+ ASTNodeNotSpelledInSourceScope RAII(this, ScopedTraversal);
+
+ MatchChildASTVisitor Visitor(&Matcher, this, Builder, MaxDepth,
+ IgnoreImplicitChildren, Bind);
return Visitor.findMatch(Node);
}
@@ -507,12 +633,10 @@ public:
// Implements ASTMatchFinder::matchesChildOf.
bool matchesChildOf(const DynTypedNode &Node, ASTContext &Ctx,
const DynTypedMatcher &Matcher,
- BoundNodesTreeBuilder *Builder, TraversalKind Traversal,
- BindKind Bind) override {
+ BoundNodesTreeBuilder *Builder, BindKind Bind) override {
if (ResultCache.size() > MaxMemoizationEntries)
ResultCache.clear();
- return memoizedMatchesRecursively(Node, Ctx, Matcher, Builder, 1, Traversal,
- Bind);
+ return memoizedMatchesRecursively(Node, Ctx, Matcher, Builder, 1, Bind);
}
// Implements ASTMatchFinder::matchesDescendantOf.
bool matchesDescendantOf(const DynTypedNode &Node, ASTContext &Ctx,
@@ -522,7 +646,7 @@ public:
if (ResultCache.size() > MaxMemoizationEntries)
ResultCache.clear();
return memoizedMatchesRecursively(Node, Ctx, Matcher, Builder, INT_MAX,
- TraversalKind::TK_AsIs, Bind);
+ Bind);
}
// Implements ASTMatchFinder::matchesAncestorOf.
bool matchesAncestorOf(const DynTypedNode &Node, ASTContext &Ctx,
@@ -533,8 +657,9 @@ public:
// don't invalidate any iterators.
if (ResultCache.size() > MaxMemoizationEntries)
ResultCache.clear();
- return memoizedMatchesAncestorOfRecursively(Node, Ctx, Matcher, Builder,
- MatchMode);
+ if (MatchMode == AncestorMatchMode::AMM_ParentOnly)
+ return matchesParentOf(Node, Matcher, Builder);
+ return matchesAnyAncestorOf(Node, Ctx, Matcher, Builder);
}
// Matches all registered matchers on the given node and calls the
@@ -557,6 +682,8 @@ public:
match(*N);
} else if (auto *N = Node.get<CXXCtorInitializer>()) {
match(*N);
+ } else if (auto *N = Node.get<TemplateArgumentLoc>()) {
+ match(*N);
}
}
@@ -570,7 +697,69 @@ public:
bool shouldVisitTemplateInstantiations() const { return true; }
bool shouldVisitImplicitCode() const { return true; }
+ bool IsMatchingInASTNodeNotSpelledInSource() const override {
+ return TraversingASTNodeNotSpelledInSource;
+ }
+ bool isMatchingChildrenNotSpelledInSource() const override {
+ return TraversingASTChildrenNotSpelledInSource;
+ }
+ void setMatchingChildrenNotSpelledInSource(bool Set) override {
+ TraversingASTChildrenNotSpelledInSource = Set;
+ }
+
+ bool IsMatchingInASTNodeNotAsIs() const override {
+ return TraversingASTNodeNotAsIs;
+ }
+
+ bool TraverseTemplateInstantiations(ClassTemplateDecl *D) {
+ ASTNodeNotSpelledInSourceScope RAII(this, true);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseTemplateInstantiations(
+ D);
+ }
+
+ bool TraverseTemplateInstantiations(VarTemplateDecl *D) {
+ ASTNodeNotSpelledInSourceScope RAII(this, true);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseTemplateInstantiations(
+ D);
+ }
+
+ bool TraverseTemplateInstantiations(FunctionTemplateDecl *D) {
+ ASTNodeNotSpelledInSourceScope RAII(this, true);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseTemplateInstantiations(
+ D);
+ }
+
private:
+ bool TraversingASTNodeNotSpelledInSource = false;
+ bool TraversingASTNodeNotAsIs = false;
+ bool TraversingASTChildrenNotSpelledInSource = false;
+
+ struct ASTNodeNotSpelledInSourceScope {
+ ASTNodeNotSpelledInSourceScope(MatchASTVisitor *V, bool B)
+ : MV(V), MB(V->TraversingASTNodeNotSpelledInSource) {
+ V->TraversingASTNodeNotSpelledInSource = B;
+ }
+ ~ASTNodeNotSpelledInSourceScope() {
+ MV->TraversingASTNodeNotSpelledInSource = MB;
+ }
+
+ private:
+ MatchASTVisitor *MV;
+ bool MB;
+ };
+
+ struct ASTNodeNotAsIsSourceScope {
+ ASTNodeNotAsIsSourceScope(MatchASTVisitor *V, bool B)
+ : MV(V), MB(V->TraversingASTNodeNotAsIs) {
+ V->TraversingASTNodeNotAsIs = B;
+ }
+ ~ASTNodeNotAsIsSourceScope() { MV->TraversingASTNodeNotAsIs = MB; }
+
+ private:
+ MatchASTVisitor *MV;
+ bool MB;
+ };
+
class TimeBucketRegion {
public:
TimeBucketRegion() : Bucket(nullptr) {}
@@ -680,12 +869,29 @@ private:
void matchDispatch(const CXXCtorInitializer *Node) {
matchWithoutFilter(*Node, Matchers->CtorInit);
}
+ void matchDispatch(const TemplateArgumentLoc *Node) {
+ matchWithoutFilter(*Node, Matchers->TemplateArgumentLoc);
+ }
void matchDispatch(const void *) { /* Do nothing. */ }
/// @}
+ // Returns whether a direct parent of \p Node matches \p Matcher.
+ // Unlike matchesAnyAncestorOf there's no memoization: it doesn't save much.
+ bool matchesParentOf(const DynTypedNode &Node, const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder) {
+ for (const auto &Parent : ActiveASTContext->getParents(Node)) {
+ BoundNodesTreeBuilder BuilderCopy = *Builder;
+ if (Matcher.matches(Parent, this, &BuilderCopy)) {
+ *Builder = std::move(BuilderCopy);
+ return true;
+ }
+ }
+ return false;
+ }
+
// Returns whether an ancestor of \p Node matches \p Matcher.
//
- // The order of matching ((which can lead to different nodes being bound in
+ // The order of matching (which can lead to different nodes being bound in
// case there are multiple matches) is breadth first search.
//
// To allow memoization in the very common case of having deeply nested
@@ -696,51 +902,64 @@ private:
// Once there are multiple parents, the breadth first search order does not
// allow simple memoization on the ancestors. Thus, we only memoize as long
// as there is a single parent.
- bool memoizedMatchesAncestorOfRecursively(const DynTypedNode &Node,
- ASTContext &Ctx,
- const DynTypedMatcher &Matcher,
- BoundNodesTreeBuilder *Builder,
- AncestorMatchMode MatchMode) {
- // For AST-nodes that don't have an identity, we can't memoize.
- // When doing a single-level match, we don't need to memoize because
- // ParentMap (in ASTContext) already memoizes the result.
- if (!Builder->isComparable() ||
- MatchMode == AncestorMatchMode::AMM_ParentOnly)
- return matchesAncestorOfRecursively(Node, Ctx, Matcher, Builder,
- MatchMode);
-
- MatchKey Key;
- Key.MatcherID = Matcher.getID();
- Key.Node = Node;
- Key.BoundNodes = *Builder;
- Key.Traversal = Ctx.getParentMapContext().getTraversalKind();
- Key.Type = MatchType::Ancestors;
+ //
+ // We avoid a recursive implementation to prevent excessive stack use on
+ // very deep ASTs (similarly to RecursiveASTVisitor's data recursion).
+ bool matchesAnyAncestorOf(DynTypedNode Node, ASTContext &Ctx,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder) {
- // Note that we cannot use insert and reuse the iterator, as recursive
- // calls to match might invalidate the result cache iterators.
- MemoizationMap::iterator I = ResultCache.find(Key);
- if (I != ResultCache.end()) {
- *Builder = I->second.Nodes;
- return I->second.ResultOfMatch;
- }
+ // Memoization keys that can be updated with the result.
+ // These are the memoizable nodes in the chain of unique parents, which
+ // terminates when a node has multiple parents, or matches, or is the root.
+ std::vector<MatchKey> Keys;
+ // When returning, update the memoization cache.
+ auto Finish = [&](bool Matched) {
+ for (const auto &Key : Keys) {
+ MemoizedMatchResult &CachedResult = ResultCache[Key];
+ CachedResult.ResultOfMatch = Matched;
+ CachedResult.Nodes = *Builder;
+ }
+ return Matched;
+ };
+
+ // Loop while there's a single parent and we want to attempt memoization.
+ DynTypedNodeList Parents{ArrayRef<DynTypedNode>()}; // after loop: size != 1
+ for (;;) {
+ // A cache key only makes sense if memoization is possible.
+ if (Builder->isComparable()) {
+ Keys.emplace_back();
+ Keys.back().MatcherID = Matcher.getID();
+ Keys.back().Node = Node;
+ Keys.back().BoundNodes = *Builder;
+ Keys.back().Traversal = Ctx.getParentMapContext().getTraversalKind();
+ Keys.back().Type = MatchType::Ancestors;
+
+ // Check the cache.
+ MemoizationMap::iterator I = ResultCache.find(Keys.back());
+ if (I != ResultCache.end()) {
+ Keys.pop_back(); // Don't populate the cache for the matching node!
+ *Builder = I->second.Nodes;
+ return Finish(I->second.ResultOfMatch);
+ }
+ }
- MemoizedMatchResult Result;
- Result.Nodes = *Builder;
- Result.ResultOfMatch = matchesAncestorOfRecursively(
- Node, Ctx, Matcher, &Result.Nodes, MatchMode);
+ Parents = ActiveASTContext->getParents(Node);
+ // Either no parents or multiple parents: leave chain+memoize mode and
+ // enter bfs+forgetful mode.
+ if (Parents.size() != 1)
+ break;
- MemoizedMatchResult &CachedResult = ResultCache[Key];
- CachedResult = std::move(Result);
-
- *Builder = CachedResult.Nodes;
- return CachedResult.ResultOfMatch;
- }
+ // Check the next parent.
+ Node = *Parents.begin();
+ BoundNodesTreeBuilder BuilderCopy = *Builder;
+ if (Matcher.matches(Node, this, &BuilderCopy)) {
+ *Builder = std::move(BuilderCopy);
+ return Finish(true);
+ }
+ }
+ // We reached the end of the chain.
- bool matchesAncestorOfRecursively(const DynTypedNode &Node, ASTContext &Ctx,
- const DynTypedMatcher &Matcher,
- BoundNodesTreeBuilder *Builder,
- AncestorMatchMode MatchMode) {
- const auto &Parents = ActiveASTContext->getParents(Node);
if (Parents.empty()) {
// Nodes may have no parents if:
// a) the node is the TranslationUnitDecl
@@ -759,46 +978,30 @@ private:
llvm_unreachable("Parent map should be complete!");
}
#endif
- return false;
- }
- if (Parents.size() == 1) {
- // Only one parent - do recursive memoization.
- const DynTypedNode Parent = Parents[0];
- BoundNodesTreeBuilder BuilderCopy = *Builder;
- if (Matcher.matches(Parent, this, &BuilderCopy)) {
- *Builder = std::move(BuilderCopy);
- return true;
- }
- if (MatchMode != ASTMatchFinder::AMM_ParentOnly) {
- return memoizedMatchesAncestorOfRecursively(Parent, Ctx, Matcher,
- Builder, MatchMode);
- // Once we get back from the recursive call, the result will be the
- // same as the parent's result.
- }
} else {
- // Multiple parents - BFS over the rest of the nodes.
- llvm::DenseSet<const void *> Visited;
+ assert(Parents.size() > 1);
+ // BFS starting from the parents not yet considered.
+ // Memoization of newly visited nodes is not possible (but we still update
+ // results for the elements in the chain we found above).
std::deque<DynTypedNode> Queue(Parents.begin(), Parents.end());
+ llvm::DenseSet<const void *> Visited;
while (!Queue.empty()) {
BoundNodesTreeBuilder BuilderCopy = *Builder;
if (Matcher.matches(Queue.front(), this, &BuilderCopy)) {
*Builder = std::move(BuilderCopy);
- return true;
+ return Finish(true);
}
- if (MatchMode != ASTMatchFinder::AMM_ParentOnly) {
- for (const auto &Parent :
- ActiveASTContext->getParents(Queue.front())) {
- // Make sure we do not visit the same node twice.
- // Otherwise, we'll visit the common ancestors as often as there
- // are splits on the way down.
- if (Visited.insert(Parent.getMemoizationData()).second)
- Queue.push_back(Parent);
- }
+ for (const auto &Parent : ActiveASTContext->getParents(Queue.front())) {
+ // Make sure we do not visit the same node twice.
+ // Otherwise, we'll visit the common ancestors as often as there
+ // are splits on the way down.
+ if (Visited.insert(Parent.getMemoizationData()).second)
+ Queue.push_back(Parent);
}
Queue.pop_front();
}
}
- return false;
+ return Finish(false);
}
// Implements a BoundNodesTree::Visitor that calls a MatchCallback with
@@ -976,6 +1179,26 @@ bool MatchASTVisitor::TraverseDecl(Decl *DeclNode) {
if (!DeclNode) {
return true;
}
+
+ bool ScopedTraversal =
+ TraversingASTNodeNotSpelledInSource || DeclNode->isImplicit();
+ bool ScopedChildren = TraversingASTChildrenNotSpelledInSource;
+
+ if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(DeclNode)) {
+ auto SK = CTSD->getSpecializationKind();
+ if (SK == TSK_ExplicitInstantiationDeclaration ||
+ SK == TSK_ExplicitInstantiationDefinition)
+ ScopedChildren = true;
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(DeclNode)) {
+ if (FD->isDefaulted())
+ ScopedChildren = true;
+ if (FD->isTemplateInstantiation())
+ ScopedTraversal = true;
+ }
+
+ ASTNodeNotSpelledInSourceScope RAII1(this, ScopedTraversal);
+ ASTChildrenNotSpelledInSourceScope RAII2(this, ScopedChildren);
+
match(*DeclNode);
return RecursiveASTVisitor<MatchASTVisitor>::TraverseDecl(DeclNode);
}
@@ -984,6 +1207,10 @@ bool MatchASTVisitor::TraverseStmt(Stmt *StmtNode, DataRecursionQueue *Queue) {
if (!StmtNode) {
return true;
}
+ bool ScopedTraversal = TraversingASTNodeNotSpelledInSource ||
+ TraversingASTChildrenNotSpelledInSource;
+
+ ASTNodeNotSpelledInSourceScope RAII(this, ScopedTraversal);
match(*StmtNode);
return RecursiveASTVisitor<MatchASTVisitor>::TraverseStmt(StmtNode, Queue);
}
@@ -1029,12 +1256,25 @@ bool MatchASTVisitor::TraverseConstructorInitializer(
if (!CtorInit)
return true;
+ bool ScopedTraversal = TraversingASTNodeNotSpelledInSource ||
+ TraversingASTChildrenNotSpelledInSource;
+
+ if (!CtorInit->isWritten())
+ ScopedTraversal = true;
+
+ ASTNodeNotSpelledInSourceScope RAII1(this, ScopedTraversal);
+
match(*CtorInit);
return RecursiveASTVisitor<MatchASTVisitor>::TraverseConstructorInitializer(
CtorInit);
}
+bool MatchASTVisitor::TraverseTemplateArgumentLoc(TemplateArgumentLoc Loc) {
+ match(Loc);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseTemplateArgumentLoc(Loc);
+}
+
class MatchASTConsumer : public ASTConsumer {
public:
MatchASTConsumer(MatchFinder *Finder,
@@ -1111,6 +1351,12 @@ void MatchFinder::addMatcher(const CXXCtorInitializerMatcher &NodeMatch,
Matchers.AllCallbacks.insert(Action);
}
+void MatchFinder::addMatcher(const TemplateArgumentLocMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Matchers.TemplateArgumentLoc.emplace_back(NodeMatch, Action);
+ Matchers.AllCallbacks.insert(Action);
+}
+
bool MatchFinder::addDynamicMatcher(const internal::DynTypedMatcher &NodeMatch,
MatchCallback *Action) {
if (NodeMatch.canConvertTo<Decl>()) {
@@ -1134,6 +1380,9 @@ bool MatchFinder::addDynamicMatcher(const internal::DynTypedMatcher &NodeMatch,
} else if (NodeMatch.canConvertTo<CXXCtorInitializer>()) {
addMatcher(NodeMatch.convertTo<CXXCtorInitializer>(), Action);
return true;
+ } else if (NodeMatch.canConvertTo<TemplateArgumentLoc>()) {
+ addMatcher(NodeMatch.convertTo<TemplateArgumentLoc>(), Action);
+ return true;
}
return false;
}
diff --git a/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index 4b9baf7a0e75..6c7e14e3499a 100644
--- a/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -49,26 +49,30 @@ AST_MATCHER_P(ObjCMessageExpr, hasAnySelectorMatcher, std::vector<std::string>,
namespace internal {
-bool NotUnaryOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- ArrayRef<DynTypedMatcher> InnerMatchers);
-
-bool AllOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- ArrayRef<DynTypedMatcher> InnerMatchers);
-
-bool EachOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- ArrayRef<DynTypedMatcher> InnerMatchers);
-
-bool AnyOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- ArrayRef<DynTypedMatcher> InnerMatchers);
-
-bool OptionallyVariadicOperator(const DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- ArrayRef<DynTypedMatcher> InnerMatchers);
+static bool notUnaryOperator(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+static bool allOfVariadicOperator(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+static bool eachOfVariadicOperator(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+static bool anyOfVariadicOperator(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+static bool optionallyVariadicOperator(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
bool matchesAnyBase(const CXXRecordDecl &Node,
const Matcher<CXXBaseSpecifier> &BaseSpecMatcher,
@@ -146,15 +150,9 @@ private:
};
/// A matcher that always returns true.
-///
-/// We only ever need one instance of this matcher, so we create a global one
-/// and reuse it to reduce the overhead of the matcher and increase the chance
-/// of cache hits.
class TrueMatcherImpl : public DynMatcherInterface {
public:
- TrueMatcherImpl() {
- Retain(); // Reference count will never become zero.
- }
+ TrueMatcherImpl() = default;
bool dynMatches(const DynTypedNode &, ASTMatchFinder *,
BoundNodesTreeBuilder *) const override {
@@ -189,7 +187,10 @@ private:
} // namespace
-static llvm::ManagedStatic<TrueMatcherImpl> TrueMatcherInstance;
+bool ASTMatchFinder::isTraversalIgnoringImplicitNodes() const {
+ return getASTContext().getParentMapContext().getTraversalKind() ==
+ TK_IgnoreUnlessSpelledInSource;
+}
DynTypedMatcher
DynTypedMatcher::constructVariadic(DynTypedMatcher::VariadicOperator Op,
@@ -220,21 +221,21 @@ DynTypedMatcher::constructVariadic(DynTypedMatcher::VariadicOperator Op,
}
return DynTypedMatcher(
SupportedKind, RestrictKind,
- new VariadicMatcher<AllOfVariadicOperator>(std::move(InnerMatchers)));
+ new VariadicMatcher<allOfVariadicOperator>(std::move(InnerMatchers)));
case VO_AnyOf:
return DynTypedMatcher(
SupportedKind, RestrictKind,
- new VariadicMatcher<AnyOfVariadicOperator>(std::move(InnerMatchers)));
+ new VariadicMatcher<anyOfVariadicOperator>(std::move(InnerMatchers)));
case VO_EachOf:
return DynTypedMatcher(
SupportedKind, RestrictKind,
- new VariadicMatcher<EachOfVariadicOperator>(std::move(InnerMatchers)));
+ new VariadicMatcher<eachOfVariadicOperator>(std::move(InnerMatchers)));
case VO_Optionally:
return DynTypedMatcher(SupportedKind, RestrictKind,
- new VariadicMatcher<OptionallyVariadicOperator>(
+ new VariadicMatcher<optionallyVariadicOperator>(
std::move(InnerMatchers)));
case VO_UnaryNot:
@@ -242,7 +243,7 @@ DynTypedMatcher::constructVariadic(DynTypedMatcher::VariadicOperator Op,
// vector.
return DynTypedMatcher(
SupportedKind, RestrictKind,
- new VariadicMatcher<NotUnaryOperator>(std::move(InnerMatchers)));
+ new VariadicMatcher<notUnaryOperator>(std::move(InnerMatchers)));
}
llvm_unreachable("Invalid Op value.");
}
@@ -255,8 +256,7 @@ DynTypedMatcher::constructRestrictedWrapper(const DynTypedMatcher &InnerMatcher,
return Copy;
}
-DynTypedMatcher
-DynTypedMatcher::withTraversalKind(ast_type_traits::TraversalKind TK) {
+DynTypedMatcher DynTypedMatcher::withTraversalKind(TraversalKind TK) {
auto Copy = *this;
Copy.Implementation =
new DynTraversalMatcherImpl(TK, std::move(Copy.Implementation));
@@ -264,7 +264,12 @@ DynTypedMatcher::withTraversalKind(ast_type_traits::TraversalKind TK) {
}
DynTypedMatcher DynTypedMatcher::trueMatcher(ASTNodeKind NodeKind) {
- return DynTypedMatcher(NodeKind, NodeKind, &*TrueMatcherInstance);
+ // We only ever need one instance of TrueMatcherImpl, so we create a static
+ // instance and reuse it to reduce the overhead of the matcher and increase
+ // the chance of cache hits.
+ static const llvm::IntrusiveRefCntPtr<TrueMatcherImpl> Instance =
+ new TrueMatcherImpl();
+ return DynTypedMatcher(NodeKind, NodeKind, Instance);
}
bool DynTypedMatcher::canMatchNodesOfKind(ASTNodeKind Kind) const {
@@ -284,6 +289,14 @@ bool DynTypedMatcher::matches(const DynTypedNode &DynNode,
TraversalKindScope RAII(Finder->getASTContext(),
Implementation->TraversalKind());
+ if (Finder->isTraversalIgnoringImplicitNodes() &&
+ Finder->IsMatchingInASTNodeNotSpelledInSource())
+ return false;
+
+ if (!Finder->isTraversalIgnoringImplicitNodes() &&
+ Finder->IsMatchingInASTNodeNotAsIs())
+ return false;
+
auto N =
Finder->getASTContext().getParentMapContext().traverseIgnored(DynNode);
@@ -304,6 +317,14 @@ bool DynTypedMatcher::matchesNoKindCheck(const DynTypedNode &DynNode,
TraversalKindScope raii(Finder->getASTContext(),
Implementation->TraversalKind());
+ if (Finder->isTraversalIgnoringImplicitNodes() &&
+ Finder->IsMatchingInASTNodeNotSpelledInSource())
+ return false;
+
+ if (!Finder->isTraversalIgnoringImplicitNodes() &&
+ Finder->IsMatchingInASTNodeNotAsIs())
+ return false;
+
auto N =
Finder->getASTContext().getParentMapContext().traverseIgnored(DynNode);
@@ -341,9 +362,10 @@ void BoundNodesTreeBuilder::addMatch(const BoundNodesTreeBuilder &Other) {
Bindings.append(Other.Bindings.begin(), Other.Bindings.end());
}
-bool NotUnaryOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- ArrayRef<DynTypedMatcher> InnerMatchers) {
+static bool notUnaryOperator(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers) {
if (InnerMatchers.size() != 1)
return false;
@@ -361,9 +383,10 @@ bool NotUnaryOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
return !InnerMatchers[0].matches(DynNode, Finder, &Discard);
}
-bool AllOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- ArrayRef<DynTypedMatcher> InnerMatchers) {
+static bool allOfVariadicOperator(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers) {
// allOf leads to one matcher for each alternative in the first
// matcher combined with each alternative in the second matcher.
// Thus, we can reuse the same Builder.
@@ -372,9 +395,10 @@ bool AllOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
});
}
-bool EachOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- ArrayRef<DynTypedMatcher> InnerMatchers) {
+static bool eachOfVariadicOperator(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
@@ -388,9 +412,10 @@ bool EachOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
return Matched;
}
-bool AnyOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- ArrayRef<DynTypedMatcher> InnerMatchers) {
+static bool anyOfVariadicOperator(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers) {
for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
BoundNodesTreeBuilder Result = *Builder;
if (InnerMatcher.matches(DynNode, Finder, &Result)) {
@@ -401,10 +426,10 @@ bool AnyOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
return false;
}
-bool OptionallyVariadicOperator(const DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- ArrayRef<DynTypedMatcher> InnerMatchers) {
+static bool
+optionallyVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers) {
if (InnerMatchers.size() != 1)
return false;
@@ -618,13 +643,10 @@ bool HasNameMatcher::matchesNodeFullSlow(const NamedDecl &Node) const {
llvm::SmallString<128> NodeName = StringRef("::");
llvm::raw_svector_ostream OS(NodeName);
- if (SkipUnwritten) {
- PrintingPolicy Policy = Node.getASTContext().getPrintingPolicy();
- Policy.SuppressUnwrittenScope = true;
- Node.printQualifiedName(OS, Policy);
- } else {
- Node.printQualifiedName(OS);
- }
+ PrintingPolicy Policy = Node.getASTContext().getPrintingPolicy();
+ Policy.SuppressUnwrittenScope = SkipUnwritten;
+ Policy.SuppressInlineNamespace = SkipUnwritten;
+ Node.printQualifiedName(OS, Policy);
const StringRef FullName = OS.str();
@@ -710,6 +732,7 @@ const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
const internal::VariadicAllOfMatcher<Decl> decl;
+const internal::VariadicAllOfMatcher<DecompositionDecl> decompositionDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
@@ -734,11 +757,15 @@ const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer;
const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
+const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc;
const internal::VariadicAllOfMatcher<TemplateName> templateName;
const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl>
+ templateTemplateParmDecl;
+
const internal::VariadicAllOfMatcher<QualType> qualType;
const internal::VariadicAllOfMatcher<Type> type;
const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
@@ -841,6 +868,8 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXRewrittenBinaryOperator>
+ cxxRewrittenBinaryOperator;
const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr;
@@ -886,10 +915,15 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr>
+ genericSelectionExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
+const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr,
+ CXXRewrittenBinaryOperator>
+ binaryOperation;
const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator;
const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
diff --git a/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp b/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
index 989ee0fa75cd..f6fdbe868e2d 100644
--- a/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
@@ -89,8 +89,9 @@ llvm::Optional<std::string>
clang::ast_matchers::dynamic::internal::ArgTypeTraits<
clang::OpenMPClauseKind>::getBestGuess(const VariantValue &Value) {
static constexpr llvm::StringRef Allowed[] = {
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) #Enum,
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) #Enum,
+#include "llvm/Frontend/OpenMP/OMP.inc"
};
if (Value.isString())
return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
@@ -120,7 +121,8 @@ static constexpr std::pair<llvm::StringRef, llvm::Regex::RegexFlags>
{"BasicRegex", llvm::Regex::RegexFlags::BasicRegex},
};
-llvm::Optional<llvm::Regex::RegexFlags> getRegexFlag(llvm::StringRef Flag) {
+static llvm::Optional<llvm::Regex::RegexFlags>
+getRegexFlag(llvm::StringRef Flag) {
for (const auto &StringFlag : RegexMap) {
if (Flag == StringFlag.first)
return StringFlag.second;
@@ -128,7 +130,8 @@ llvm::Optional<llvm::Regex::RegexFlags> getRegexFlag(llvm::StringRef Flag) {
return llvm::None;
}
-llvm::Optional<llvm::StringRef> getCloseRegexMatch(llvm::StringRef Flag) {
+static llvm::Optional<llvm::StringRef>
+getCloseRegexMatch(llvm::StringRef Flag) {
for (const auto &StringFlag : RegexMap) {
if (Flag.edit_distance(StringFlag.first) < 3)
return StringFlag.first;
diff --git a/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/clang/lib/ASTMatchers/Dynamic/Marshallers.h
index 33f6d1e4155c..690b52162e2b 100644
--- a/clang/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -58,7 +58,10 @@ template <class T> struct ArgTypeTraits<const T &> : public ArgTypeTraits<T> {
};
template <> struct ArgTypeTraits<std::string> {
- static bool is(const VariantValue &Value) { return Value.isString(); }
+ static bool hasCorrectType(const VariantValue &Value) {
+ return Value.isString();
+ }
+ static bool hasCorrectValue(const VariantValue &Value) { return true; }
static const std::string &get(const VariantValue &Value) {
return Value.getString();
@@ -78,8 +81,11 @@ struct ArgTypeTraits<StringRef> : public ArgTypeTraits<std::string> {
};
template <class T> struct ArgTypeTraits<ast_matchers::internal::Matcher<T>> {
- static bool is(const VariantValue &Value) {
- return Value.isMatcher() && Value.getMatcher().hasTypedMatcher<T>();
+ static bool hasCorrectType(const VariantValue& Value) {
+ return Value.isMatcher();
+ }
+ static bool hasCorrectValue(const VariantValue &Value) {
+ return Value.getMatcher().hasTypedMatcher<T>();
}
static ast_matchers::internal::Matcher<T> get(const VariantValue &Value) {
@@ -96,7 +102,10 @@ template <class T> struct ArgTypeTraits<ast_matchers::internal::Matcher<T>> {
};
template <> struct ArgTypeTraits<bool> {
- static bool is(const VariantValue &Value) { return Value.isBoolean(); }
+ static bool hasCorrectType(const VariantValue &Value) {
+ return Value.isBoolean();
+ }
+ static bool hasCorrectValue(const VariantValue &Value) { return true; }
static bool get(const VariantValue &Value) {
return Value.getBoolean();
@@ -112,7 +121,10 @@ template <> struct ArgTypeTraits<bool> {
};
template <> struct ArgTypeTraits<double> {
- static bool is(const VariantValue &Value) { return Value.isDouble(); }
+ static bool hasCorrectType(const VariantValue &Value) {
+ return Value.isDouble();
+ }
+ static bool hasCorrectValue(const VariantValue &Value) { return true; }
static double get(const VariantValue &Value) {
return Value.getDouble();
@@ -128,7 +140,10 @@ template <> struct ArgTypeTraits<double> {
};
template <> struct ArgTypeTraits<unsigned> {
- static bool is(const VariantValue &Value) { return Value.isUnsigned(); }
+ static bool hasCorrectType(const VariantValue &Value) {
+ return Value.isUnsigned();
+ }
+ static bool hasCorrectValue(const VariantValue &Value) { return true; }
static unsigned get(const VariantValue &Value) {
return Value.getUnsigned();
@@ -146,15 +161,20 @@ template <> struct ArgTypeTraits<unsigned> {
template <> struct ArgTypeTraits<attr::Kind> {
private:
static Optional<attr::Kind> getAttrKind(llvm::StringRef AttrKind) {
+ if (!AttrKind.consume_front("attr::"))
+ return llvm::None;
return llvm::StringSwitch<Optional<attr::Kind>>(AttrKind)
-#define ATTR(X) .Case("attr::" #X, attr:: X)
+#define ATTR(X) .Case(#X, attr::X)
#include "clang/Basic/AttrList.inc"
.Default(llvm::None);
}
public:
- static bool is(const VariantValue &Value) {
- return Value.isString() && getAttrKind(Value.getString());
+ static bool hasCorrectType(const VariantValue &Value) {
+ return Value.isString();
+ }
+ static bool hasCorrectValue(const VariantValue& Value) {
+ return getAttrKind(Value.getString()).hasValue();
}
static attr::Kind get(const VariantValue &Value) {
@@ -171,15 +191,20 @@ public:
template <> struct ArgTypeTraits<CastKind> {
private:
static Optional<CastKind> getCastKind(llvm::StringRef AttrKind) {
+ if (!AttrKind.consume_front("CK_"))
+ return llvm::None;
return llvm::StringSwitch<Optional<CastKind>>(AttrKind)
-#define CAST_OPERATION(Name) .Case("CK_" #Name, CK_##Name)
+#define CAST_OPERATION(Name) .Case(#Name, CK_##Name)
#include "clang/AST/OperationKinds.def"
.Default(llvm::None);
}
public:
- static bool is(const VariantValue &Value) {
- return Value.isString() && getCastKind(Value.getString());
+ static bool hasCorrectType(const VariantValue &Value) {
+ return Value.isString();
+ }
+ static bool hasCorrectValue(const VariantValue& Value) {
+ return getCastKind(Value.getString()).hasValue();
}
static CastKind get(const VariantValue &Value) {
@@ -198,8 +223,11 @@ private:
static Optional<llvm::Regex::RegexFlags> getFlags(llvm::StringRef Flags);
public:
- static bool is(const VariantValue &Value) {
- return Value.isString() && getFlags(Value.getString());
+ static bool hasCorrectType(const VariantValue &Value) {
+ return Value.isString();
+ }
+ static bool hasCorrectValue(const VariantValue& Value) {
+ return getFlags(Value.getString()).hasValue();
}
static llvm::Regex::RegexFlags get(const VariantValue &Value) {
@@ -215,14 +243,18 @@ template <> struct ArgTypeTraits<OpenMPClauseKind> {
private:
static Optional<OpenMPClauseKind> getClauseKind(llvm::StringRef ClauseKind) {
return llvm::StringSwitch<Optional<OpenMPClauseKind>>(ClauseKind)
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) .Case(#Enum, llvm::omp::Clause::Enum)
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) .Case(#Enum, llvm::omp::Clause::Enum)
+#include "llvm/Frontend/OpenMP/OMP.inc"
.Default(llvm::None);
}
public:
- static bool is(const VariantValue &Value) {
- return Value.isString() && getClauseKind(Value.getString());
+ static bool hasCorrectType(const VariantValue &Value) {
+ return Value.isString();
+ }
+ static bool hasCorrectValue(const VariantValue& Value) {
+ return getClauseKind(Value.getString()).hasValue();
}
static OpenMPClauseKind get(const VariantValue &Value) {
@@ -238,18 +270,22 @@ template <> struct ArgTypeTraits<UnaryExprOrTypeTrait> {
private:
static Optional<UnaryExprOrTypeTrait>
getUnaryOrTypeTraitKind(llvm::StringRef ClauseKind) {
+ if (!ClauseKind.consume_front("UETT_"))
+ return llvm::None;
return llvm::StringSwitch<Optional<UnaryExprOrTypeTrait>>(ClauseKind)
-#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) \
- .Case("UETT_" #Name, UETT_##Name)
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) .Case(#Name, UETT_##Name)
#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) \
- .Case("UETT_" #Name, UETT_##Name)
+ .Case(#Name, UETT_##Name)
#include "clang/Basic/TokenKinds.def"
.Default(llvm::None);
}
public:
- static bool is(const VariantValue &Value) {
- return Value.isString() && getUnaryOrTypeTraitKind(Value.getString());
+ static bool hasCorrectType(const VariantValue &Value) {
+ return Value.isString();
+ }
+ static bool hasCorrectValue(const VariantValue& Value) {
+ return getUnaryOrTypeTraitKind(Value.getString()).hasValue();
}
static UnaryExprOrTypeTrait get(const VariantValue &Value) {
@@ -453,12 +489,31 @@ variadicMatcherDescriptor(StringRef MatcherName, SourceRange NameRange,
const ParserValue &Arg = Args[i];
const VariantValue &Value = Arg.Value;
- if (!ArgTraits::is(Value)) {
+ if (!ArgTraits::hasCorrectType(Value)) {
Error->addError(Arg.Range, Error->ET_RegistryWrongArgType)
<< (i + 1) << ArgTraits::getKind().asString() << Value.getTypeAsString();
HasError = true;
break;
}
+ if (!ArgTraits::hasCorrectValue(Value)) {
+ if (llvm::Optional<std::string> BestGuess =
+ ArgTraits::getBestGuess(Value)) {
+ Error->addError(Arg.Range, Error->ET_RegistryUnknownEnumWithReplace)
+ << i + 1 << Value.getString() << *BestGuess;
+ } else if (Value.isString()) {
+ Error->addError(Arg.Range, Error->ET_RegistryValueNotFound)
+ << Value.getString();
+ } else {
+ // This isn't ideal, but it's better than reporting an empty string as
+ // the error in this case.
+ Error->addError(Arg.Range, Error->ET_RegistryWrongArgType)
+ << (i + 1) << ArgTraits::getKind().asString()
+ << Value.getTypeAsString();
+ }
+ HasError = true;
+ break;
+ }
+
InnerArgs[i] = new ArgT(ArgTraits::get(Value));
}
@@ -568,16 +623,21 @@ private:
}
#define CHECK_ARG_TYPE(index, type) \
- if (!ArgTypeTraits<type>::is(Args[index].Value)) { \
+ if (!ArgTypeTraits<type>::hasCorrectType(Args[index].Value)) { \
+ Error->addError(Args[index].Range, Error->ET_RegistryWrongArgType) \
+ << (index + 1) << ArgTypeTraits<type>::getKind().asString() \
+ << Args[index].Value.getTypeAsString(); \
+ return VariantMatcher(); \
+ } \
+ if (!ArgTypeTraits<type>::hasCorrectValue(Args[index].Value)) { \
if (llvm::Optional<std::string> BestGuess = \
ArgTypeTraits<type>::getBestGuess(Args[index].Value)) { \
Error->addError(Args[index].Range, \
Error->ET_RegistryUnknownEnumWithReplace) \
<< index + 1 << Args[index].Value.getString() << *BestGuess; \
- } else { \
- Error->addError(Args[index].Range, Error->ET_RegistryWrongArgType) \
- << (index + 1) << ArgTypeTraits<type>::getKind().asString() \
- << Args[index].Value.getTypeAsString(); \
+ } else if (Args[index].Value.isString()) { \
+ Error->addError(Args[index].Range, Error->ET_RegistryValueNotFound) \
+ << Args[index].Value.getString(); \
} \
return VariantMatcher(); \
}
@@ -761,7 +821,7 @@ public:
<< "1 or 2" << Args.size();
return VariantMatcher();
}
- if (!ArgTypeTraits<StringRef>::is(Args[0].Value)) {
+ if (!ArgTypeTraits<StringRef>::hasCorrectType(Args[0].Value)) {
Error->addError(Args[0].Range, Error->ET_RegistryWrongArgType)
<< 1 << ArgTypeTraits<StringRef>::getKind().asString()
<< Args[0].Value.getTypeAsString();
@@ -771,16 +831,23 @@ public:
return outvalueToVariantMatcher(
NoFlags(ArgTypeTraits<StringRef>::get(Args[0].Value)));
}
- if (!ArgTypeTraits<llvm::Regex::RegexFlags>::is(Args[1].Value)) {
+ if (!ArgTypeTraits<llvm::Regex::RegexFlags>::hasCorrectType(
+ Args[1].Value)) {
+ Error->addError(Args[1].Range, Error->ET_RegistryWrongArgType)
+ << 2 << ArgTypeTraits<llvm::Regex::RegexFlags>::getKind().asString()
+ << Args[1].Value.getTypeAsString();
+ return VariantMatcher();
+ }
+ if (!ArgTypeTraits<llvm::Regex::RegexFlags>::hasCorrectValue(
+ Args[1].Value)) {
if (llvm::Optional<std::string> BestGuess =
ArgTypeTraits<llvm::Regex::RegexFlags>::getBestGuess(
Args[1].Value)) {
Error->addError(Args[1].Range, Error->ET_RegistryUnknownEnumWithReplace)
<< 2 << Args[1].Value.getString() << *BestGuess;
} else {
- Error->addError(Args[1].Range, Error->ET_RegistryWrongArgType)
- << 2 << ArgTypeTraits<llvm::Regex::RegexFlags>::getKind().asString()
- << Args[1].Value.getTypeAsString();
+ Error->addError(Args[1].Range, Error->ET_RegistryValueNotFound)
+ << Args[1].Value.getString();
}
return VariantMatcher();
}
@@ -858,6 +925,70 @@ private:
const StringRef MatcherName;
};
+class MapAnyOfMatcherDescriptor : public MatcherDescriptor {
+ ASTNodeKind CladeNodeKind;
+ std::vector<ASTNodeKind> NodeKinds;
+
+public:
+ MapAnyOfMatcherDescriptor(ASTNodeKind CladeNodeKind,
+ std::vector<ASTNodeKind> NodeKinds)
+ : CladeNodeKind(CladeNodeKind), NodeKinds(NodeKinds) {}
+
+ VariantMatcher create(SourceRange NameRange, ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const override {
+
+ std::vector<DynTypedMatcher> NodeArgs;
+
+ for (auto NK : NodeKinds) {
+ std::vector<DynTypedMatcher> InnerArgs;
+
+ for (const auto &Arg : Args) {
+ if (!Arg.Value.isMatcher())
+ return {};
+ const VariantMatcher &VM = Arg.Value.getMatcher();
+ if (VM.hasTypedMatcher(NK)) {
+ auto DM = VM.getTypedMatcher(NK);
+ InnerArgs.push_back(DM);
+ }
+ }
+
+ if (InnerArgs.empty()) {
+ NodeArgs.push_back(
+ DynTypedMatcher::trueMatcher(NK).dynCastTo(CladeNodeKind));
+ } else {
+ NodeArgs.push_back(
+ DynTypedMatcher::constructVariadic(
+ ast_matchers::internal::DynTypedMatcher::VO_AllOf, NK,
+ InnerArgs)
+ .dynCastTo(CladeNodeKind));
+ }
+ }
+
+ auto Result = DynTypedMatcher::constructVariadic(
+ ast_matchers::internal::DynTypedMatcher::VO_AnyOf, CladeNodeKind,
+ NodeArgs);
+ Result.setAllowBind(true);
+ return VariantMatcher::SingleMatcher(Result);
+ }
+
+ bool isVariadic() const override { return true; }
+ unsigned getNumArgs() const override { return 0; }
+
+ void getArgKinds(ASTNodeKind ThisKind, unsigned,
+ std::vector<ArgKind> &Kinds) const override {
+ Kinds.push_back(ThisKind);
+ }
+
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
+ if (Specificity)
+ *Specificity = 1;
+ if (LeastDerivedKind)
+ *LeastDerivedKind = CladeNodeKind;
+ return true;
+ }
+};
+
/// Helper functions to select the appropriate marshaller functions.
/// They detect the number of arguments, arguments types and return type.
@@ -962,6 +1093,15 @@ std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
MinCount, MaxCount, Func.Op, MatcherName);
}
+template <typename CladeType, typename... MatcherT>
+std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
+ ast_matchers::internal::MapAnyOfMatcherImpl<CladeType, MatcherT...>,
+ StringRef MatcherName) {
+ return std::make_unique<MapAnyOfMatcherDescriptor>(
+ ASTNodeKind::getFromNodeKind<CladeType>(),
+ std::vector<ASTNodeKind>{ASTNodeKind::getFromNodeKind<MatcherT>()...});
+}
+
} // namespace internal
} // namespace dynamic
} // namespace ast_matchers
diff --git a/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index ec2215804c09..00a7c74a0b90 100644
--- a/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -31,8 +31,6 @@
#include <utility>
#include <vector>
-using namespace clang::ast_type_traits;
-
namespace clang {
namespace ast_matchers {
namespace dynamic {
@@ -145,6 +143,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(autoreleasePoolStmt)
REGISTER_MATCHER(binaryConditionalOperator);
REGISTER_MATCHER(binaryOperator);
+ REGISTER_MATCHER(binaryOperation);
REGISTER_MATCHER(blockDecl);
REGISTER_MATCHER(blockExpr);
REGISTER_MATCHER(blockPointerType);
@@ -193,6 +192,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(cxxOperatorCallExpr);
REGISTER_MATCHER(cxxRecordDecl);
REGISTER_MATCHER(cxxReinterpretCastExpr);
+ REGISTER_MATCHER(cxxRewrittenBinaryOperator);
REGISTER_MATCHER(cxxStaticCastExpr);
REGISTER_MATCHER(cxxStdInitializerListExpr);
REGISTER_MATCHER(cxxTemporaryObjectExpr);
@@ -202,6 +202,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(cxxUnresolvedConstructExpr);
REGISTER_MATCHER(decayedType);
REGISTER_MATCHER(decl);
+ REGISTER_MATCHER(decompositionDecl);
REGISTER_MATCHER(declCountIs);
REGISTER_MATCHER(declRefExpr);
REGISTER_MATCHER(declStmt);
@@ -227,6 +228,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(floatLiteral);
REGISTER_MATCHER(forEach);
REGISTER_MATCHER(forEachArgumentWithParam);
+ REGISTER_MATCHER(forEachArgumentWithParamType);
REGISTER_MATCHER(forEachConstructorInitializer);
REGISTER_MATCHER(forEachDescendant);
REGISTER_MATCHER(forEachOverridden);
@@ -239,6 +241,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(functionProtoType);
REGISTER_MATCHER(functionTemplateDecl);
REGISTER_MATCHER(functionType);
+ REGISTER_MATCHER(genericSelectionExpr);
REGISTER_MATCHER(gnuNullExpr);
REGISTER_MATCHER(gotoStmt);
REGISTER_MATCHER(has);
@@ -300,6 +303,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasLocalStorage);
REGISTER_MATCHER(hasLoopInit);
REGISTER_MATCHER(hasLoopVariable);
+ REGISTER_MATCHER(hasMemberName);
REGISTER_MATCHER(hasMethod);
REGISTER_MATCHER(hasName);
REGISTER_MATCHER(hasNullSelector);
@@ -431,6 +435,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isVirtual);
REGISTER_MATCHER(isVirtualAsWritten);
REGISTER_MATCHER(isVolatileQualified);
+ REGISTER_MATCHER(isWeak);
REGISTER_MATCHER(isWritten);
REGISTER_MATCHER(lValueReferenceType);
REGISTER_MATCHER(labelDecl);
@@ -440,6 +445,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(materializeTemporaryExpr);
REGISTER_MATCHER(member);
REGISTER_MATCHER(memberExpr);
+ REGISTER_MATCHER(memberHasSameNameAsBoundNode);
REGISTER_MATCHER(memberPointerType);
REGISTER_MATCHER(namedDecl);
REGISTER_MATCHER(namesType);
@@ -514,6 +520,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(templateArgumentCountIs);
REGISTER_MATCHER(templateName);
REGISTER_MATCHER(templateSpecializationType);
+ REGISTER_MATCHER(templateTemplateParmDecl);
REGISTER_MATCHER(templateTypeParmDecl);
REGISTER_MATCHER(templateTypeParmType);
REGISTER_MATCHER(throughUsingDecl);
diff --git a/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp b/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
index 866e2d0e3491..d1ecb1e00b91 100644
--- a/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -59,6 +59,11 @@ VariantMatcher::MatcherOps::canConstructFrom(const DynTypedMatcher &Matcher,
return Matcher.canConvertTo(NodeKind);
}
+DynTypedMatcher VariantMatcher::MatcherOps::convertMatcher(
+ const DynTypedMatcher &Matcher) const {
+ return Matcher.dynCastTo(NodeKind);
+}
+
llvm::Optional<DynTypedMatcher>
VariantMatcher::MatcherOps::constructVariadicOperator(
DynTypedMatcher::VariadicOperator Op,
@@ -263,6 +268,10 @@ VariantValue::VariantValue(StringRef String) : Type(VT_Nothing) {
setString(String);
}
+VariantValue::VariantValue(ASTNodeKind NodeKind) : Type(VT_Nothing) {
+ setNodeKind(NodeKind);
+}
+
VariantValue::VariantValue(const VariantMatcher &Matcher) : Type(VT_Nothing) {
setMatcher(Matcher);
}
@@ -285,6 +294,9 @@ VariantValue &VariantValue::operator=(const VariantValue &Other) {
case VT_String:
setString(Other.getString());
break;
+ case VT_NodeKind:
+ setNodeKind(Other.getNodeKind());
+ break;
case VT_Matcher:
setMatcher(Other.getMatcher());
break;
@@ -303,6 +315,9 @@ void VariantValue::reset() {
case VT_Matcher:
delete Value.Matcher;
break;
+ case VT_NodeKind:
+ delete Value.NodeKind;
+ break;
// Cases that do nothing.
case VT_Boolean:
case VT_Double:
@@ -373,6 +388,19 @@ void VariantValue::setString(StringRef NewValue) {
Value.String = new std::string(NewValue);
}
+bool VariantValue::isNodeKind() const { return Type == VT_NodeKind; }
+
+const ASTNodeKind &VariantValue::getNodeKind() const {
+ assert(isNodeKind());
+ return *Value.NodeKind;
+}
+
+void VariantValue::setNodeKind(ASTNodeKind NewValue) {
+ reset();
+ Type = VT_NodeKind;
+ Value.NodeKind = new ASTNodeKind(NewValue);
+}
+
bool VariantValue::isMatcher() const {
return Type == VT_Matcher;
}
@@ -444,6 +472,8 @@ std::string VariantValue::getTypeAsString() const {
case VT_Boolean: return "Boolean";
case VT_Double: return "Double";
case VT_Unsigned: return "Unsigned";
+ case VT_NodeKind:
+ return getNodeKind().asStringRef().str();
case VT_Nothing: return "Nothing";
}
llvm_unreachable("Invalid Type");
diff --git a/clang/lib/ASTMatchers/GtestMatchers.cpp b/clang/lib/ASTMatchers/GtestMatchers.cpp
index c99fdf6c0fcd..0e587c0c3b9f 100644
--- a/clang/lib/ASTMatchers/GtestMatchers.cpp
+++ b/clang/lib/ASTMatchers/GtestMatchers.cpp
@@ -89,14 +89,14 @@ static llvm::StringRef getExpectMacro(GtestCmp Cmp) {
internal::BindableMatcher<Stmt> gtestAssert(GtestCmp Cmp, StatementMatcher Left,
StatementMatcher Right) {
return callExpr(callee(getComparisonDecl(Cmp)),
- isExpandedFromMacro(getAssertMacro(Cmp)),
+ isExpandedFromMacro(getAssertMacro(Cmp).str()),
hasArgument(2, Left), hasArgument(3, Right));
}
internal::BindableMatcher<Stmt> gtestExpect(GtestCmp Cmp, StatementMatcher Left,
StatementMatcher Right) {
return callExpr(callee(getComparisonDecl(Cmp)),
- isExpandedFromMacro(getExpectMacro(Cmp)),
+ isExpandedFromMacro(getExpectMacro(Cmp).str()),
hasArgument(2, Left), hasArgument(3, Right));
}
diff --git a/clang/lib/Analysis/BodyFarm.cpp b/clang/lib/Analysis/BodyFarm.cpp
index f9f0553d28f0..603da6715625 100644
--- a/clang/lib/Analysis/BodyFarm.cpp
+++ b/clang/lib/Analysis/BodyFarm.cpp
@@ -166,23 +166,21 @@ ASTMaker::makeLvalueToRvalue(const VarDecl *Arg,
ImplicitCastExpr *ASTMaker::makeImplicitCast(const Expr *Arg, QualType Ty,
CastKind CK) {
return ImplicitCastExpr::Create(C, Ty,
- /* CastKind=*/ CK,
- /* Expr=*/ const_cast<Expr *>(Arg),
- /* CXXCastPath=*/ nullptr,
- /* ExprValueKind=*/ VK_RValue);
+ /* CastKind=*/CK,
+ /* Expr=*/const_cast<Expr *>(Arg),
+ /* CXXCastPath=*/nullptr,
+ /* ExprValueKind=*/VK_RValue,
+ /* FPFeatures */ FPOptionsOverride());
}
Expr *ASTMaker::makeIntegralCast(const Expr *Arg, QualType Ty) {
if (Arg->getType() == Ty)
return const_cast<Expr*>(Arg);
-
- return ImplicitCastExpr::Create(C, Ty, CK_IntegralCast,
- const_cast<Expr*>(Arg), nullptr, VK_RValue);
+ return makeImplicitCast(Arg, Ty, CK_IntegralCast);
}
ImplicitCastExpr *ASTMaker::makeIntegralCastToBoolean(const Expr *Arg) {
- return ImplicitCastExpr::Create(C, C.BoolTy, CK_IntegralToBoolean,
- const_cast<Expr*>(Arg), nullptr, VK_RValue);
+ return makeImplicitCast(Arg, C.BoolTy, CK_IntegralToBoolean);
}
ObjCBoolLiteralExpr *ASTMaker::makeObjCBool(bool Val) {
@@ -267,7 +265,7 @@ static CallExpr *create_call_once_funcptr_call(ASTContext &C, ASTMaker M,
}
return CallExpr::Create(C, SubExpr, CallArgs, C.VoidTy, VK_RValue,
- SourceLocation());
+ SourceLocation(), FPOptionsOverride());
}
static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
@@ -468,6 +466,8 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
/* Init=*/nullptr,
/* Var=*/nullptr,
/* Cond=*/FlagCheck,
+ /* LPL=*/SourceLocation(),
+ /* RPL=*/SourceLocation(),
/* Then=*/M.makeCompound({CallbackCall, FlagAssignment}));
return Out;
@@ -514,7 +514,7 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
/*Args=*/None,
/*QualType=*/C.VoidTy,
/*ExprValueType=*/VK_RValue,
- /*SourceLocation=*/SourceLocation());
+ /*SourceLocation=*/SourceLocation(), FPOptionsOverride());
// (2) Create the assignment to the predicate.
Expr *DoneValue =
@@ -552,6 +552,8 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
/* Init=*/nullptr,
/* Var=*/nullptr,
/* Cond=*/GuardCondition,
+ /* LPL=*/SourceLocation(),
+ /* RPL=*/SourceLocation(),
/* Then=*/CS);
return If;
}
@@ -578,8 +580,8 @@ static Stmt *create_dispatch_sync(ASTContext &C, const FunctionDecl *D) {
ASTMaker M(C);
DeclRefExpr *DR = M.makeDeclRefExpr(PV);
ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
- CallExpr *CE =
- CallExpr::Create(C, ICE, None, C.VoidTy, VK_RValue, SourceLocation());
+ CallExpr *CE = CallExpr::Create(C, ICE, None, C.VoidTy, VK_RValue,
+ SourceLocation(), FPOptionsOverride());
return CE;
}
@@ -655,11 +657,13 @@ static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
Stmt *Else = M.makeReturn(RetVal);
/// Construct the If.
- auto *If = IfStmt::Create(C, SourceLocation(),
- /* IsConstexpr=*/false,
- /* Init=*/nullptr,
- /* Var=*/nullptr, Comparison, Body,
- SourceLocation(), Else);
+ auto *If =
+ IfStmt::Create(C, SourceLocation(),
+ /* IsConstexpr=*/false,
+ /* Init=*/nullptr,
+ /* Var=*/nullptr, Comparison,
+ /* LPL=*/SourceLocation(),
+ /* RPL=*/SourceLocation(), Body, SourceLocation(), Else);
return If;
}
diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp
index fc74226951a4..edc86c41c3b9 100644
--- a/clang/lib/Analysis/CFG.cpp
+++ b/clang/lib/Analysis/CFG.cpp
@@ -4773,11 +4773,11 @@ CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E,
CFGBlock *CFGBuilder::VisitBinaryOperatorForTemporaryDtors(
BinaryOperator *E, bool ExternallyDestructed, TempDtorContext &Context) {
if (E->isCommaOp()) {
- // For comma operator LHS expression is visited
- // before RHS expression. For destructors visit them in reverse order.
- CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), ExternallyDestructed, Context);
+ // For the comma operator, the LHS expression is evaluated before the RHS
+ // expression, so prepend temporary destructors for the LHS first.
CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
- return LHSBlock ? LHSBlock : RHSBlock;
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), ExternallyDestructed, Context);
+ return RHSBlock ? RHSBlock : LHSBlock;
}
if (E->isLogicalOp()) {
@@ -4798,19 +4798,15 @@ CFGBlock *CFGBuilder::VisitBinaryOperatorForTemporaryDtors(
}
if (E->isAssignmentOp()) {
- // For assignment operator (=) LHS expression is visited
- // before RHS expression. For destructors visit them in reverse order.
+ // For assignment operators, the RHS expression is evaluated before the LHS
+ // expression, so prepend temporary destructors for the RHS first.
CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), false, Context);
CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
return LHSBlock ? LHSBlock : RHSBlock;
}
- // For any other binary operator RHS expression is visited before
- // LHS expression (order of children). For destructors visit them in reverse
- // order.
- CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
- CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), false, Context);
- return RHSBlock ? RHSBlock : LHSBlock;
+ // Any other operator is visited normally.
+ return VisitChildrenForTemporaryDtors(E, ExternallyDestructed, Context);
}
CFGBlock *CFGBuilder::VisitCXXBindTemporaryExprForTemporaryDtors(
@@ -4917,14 +4913,13 @@ CFGBlock *CFGBuilder::VisitOMPExecutableDirective(OMPExecutableDirective *D,
B = R;
}
// Visit associated structured block if any.
- if (!D->isStandaloneDirective())
- if (CapturedStmt *CS = D->getInnermostCapturedStmt()) {
- Stmt *S = CS->getCapturedStmt();
- if (!isa<CompoundStmt>(S))
- addLocalScopeAndDtors(S);
- if (CFGBlock *R = addStmt(S))
- B = R;
- }
+ if (!D->isStandaloneDirective()) {
+ Stmt *S = D->getRawStmt();
+ if (!isa<CompoundStmt>(S))
+ addLocalScopeAndDtors(S);
+ if (CFGBlock *R = addStmt(S))
+ B = R;
+ }
return B;
}
diff --git a/clang/lib/Analysis/CalledOnceCheck.cpp b/clang/lib/Analysis/CalledOnceCheck.cpp
new file mode 100644
index 000000000000..883629a300dc
--- /dev/null
+++ b/clang/lib/Analysis/CalledOnceCheck.cpp
@@ -0,0 +1,1525 @@
+//===- CalledOnceCheck.cpp - Check 'called once' parameters ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/CalledOnceCheck.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Sequence.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <memory>
+
+using namespace clang;
+
+namespace {
+static constexpr unsigned EXPECTED_MAX_NUMBER_OF_PARAMS = 2;
+template <class T>
+using ParamSizedVector = llvm::SmallVector<T, EXPECTED_MAX_NUMBER_OF_PARAMS>;
+static constexpr unsigned EXPECTED_NUMBER_OF_BASIC_BLOCKS = 8;
+template <class T>
+using CFGSizedVector = llvm::SmallVector<T, EXPECTED_NUMBER_OF_BASIC_BLOCKS>;
+constexpr llvm::StringLiteral CONVENTIONAL_NAMES[] = {
+ "completionHandler", "completion", "withCompletionHandler"};
+constexpr llvm::StringLiteral CONVENTIONAL_SUFFIXES[] = {
+ "WithCompletionHandler", "WithCompletion"};
+constexpr llvm::StringLiteral CONVENTIONAL_CONDITIONS[] = {
+ "error", "cancel", "shouldCall", "done", "OK", "success"};
+
+class ParameterStatus {
+public:
+ // Status kind is basically the main part of parameter's status.
+ // The kind represents our knowledge (so far) about a tracked parameter
+ // in the context of this analysis.
+ //
+ // Since we want to report on missing and extraneous calls, we need to
+ // track the fact whether paramater was called or not. This automatically
+ // decides two kinds: `NotCalled` and `Called`.
+ //
+ // One of the erroneous situations is the case when parameter is called only
+ // on some of the paths. We could've considered it `NotCalled`, but we want
+ // to report double call warnings even if these two calls are not guaranteed
+ // to happen in every execution. We also don't want to have it as `Called`
+ // because not calling tracked parameter on all of the paths is an error
+ // on its own. For these reasons, we need to have a separate kind,
+ // `MaybeCalled`, and change `Called` to `DefinitelyCalled` to avoid
+ // confusion.
+ //
+ // Two violations of calling parameter more than once and not calling it on
+ // every path are not, however, mutually exclusive. In situations where both
+ // violations take place, we prefer to report ONLY double call. It's always
+ // harder to pinpoint a bug that has arisen when a user neglects to take the
+ // right action (and therefore, no action is taken), than when a user takes
+ // the wrong action. And, in order to remember that we already reported
+ // a double call, we need another kind: `Reported`.
+ //
+ // Our analysis is intra-procedural and, while in the perfect world,
+ // developers only use tracked parameters to call them, in the real world,
+ // the picture might be different. Parameters can be stored in global
+ // variables or leaked into other functions that we know nothing about.
+ // We try to be lenient and trust users. Another kind `Escaped` reflects
+ // such situations. We don't know if it gets called there or not, but we
+ // should always think of `Escaped` as the best possible option.
+ //
+ // Some of the paths in the analyzed functions might end with a call
+ // to noreturn functions. Such paths are not required to have parameter
+ // calls and we want to track that. For the purposes of better diagnostics,
+ // we don't want to reuse `Escaped` and, thus, have another kind `NoReturn`.
+ //
+ // Additionally, we have `NotVisited` kind that tells us nothing about
+ // a tracked parameter, but is used for tracking analyzed (aka visited)
+ // basic blocks.
+ //
+ // If we consider `|` to be a JOIN operation of two kinds coming from
+ // two different paths, the following properties must hold:
+ //
+ // 1. for any Kind K: K | K == K
+ // Joining two identical kinds should result in the same kind.
+ //
+ // 2. for any Kind K: Reported | K == Reported
+ // Doesn't matter on which path it was reported, it still is.
+ //
+ // 3. for any Kind K: NoReturn | K == K
+ // We can totally ignore noreturn paths during merges.
+ //
+ // 4. DefinitelyCalled | NotCalled == MaybeCalled
+ // Called on one path, not called on another - that's simply
+ // a definition for MaybeCalled.
+ //
+ // 5. for any Kind K in [DefinitelyCalled, NotCalled, MaybeCalled]:
+ // Escaped | K == K
+ // Escaped mirrors other statuses after joins.
+ // Every situation, when we join any of the listed kinds K,
+ // is a violation. For this reason, in order to assume the
+ // best outcome for this escape, we consider it to be the
+ // same as the other path.
+ //
+ // 6. for any Kind K in [DefinitelyCalled, NotCalled]:
+ // MaybeCalled | K == MaybeCalled
+ // MaybeCalled should basically stay after almost every join.
+ enum Kind {
+ // No-return paths should be absolutely transparent for the analysis.
+ // 0x0 is the identity element for selected join operation (binary or).
+ NoReturn = 0x0, /* 0000 */
+ // Escaped marks situations when marked parameter escaped into
+ // another function (so we can assume that it was possibly called there).
+ Escaped = 0x1, /* 0001 */
+ // Parameter was definitely called once at this point.
+ DefinitelyCalled = 0x3, /* 0011 */
+ // Kinds less or equal to NON_ERROR_STATUS are not considered errors.
+ NON_ERROR_STATUS = DefinitelyCalled,
+ // Parameter was not yet called.
+ NotCalled = 0x5, /* 0101 */
+ // Parameter was not called at least on one path leading to this point,
+ // while there is also at least one path that it gets called.
+ MaybeCalled = 0x7, /* 0111 */
+ // Parameter was not yet analyzed.
+ NotVisited = 0x8, /* 1000 */
+ // We already reported a violation and stopped tracking calls for this
+ // parameter.
+ Reported = 0x15, /* 1111 */
+ LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ Reported)
+ };
+
+ constexpr ParameterStatus() = default;
+ /* implicit */ ParameterStatus(Kind K) : StatusKind(K) {
+ assert(!seenAnyCalls(K) && "Can't initialize status without a call");
+ }
+ ParameterStatus(Kind K, const Expr *Call) : StatusKind(K), Call(Call) {
+ assert(seenAnyCalls(K) && "This kind is not supposed to have a call");
+ }
+
+ const Expr &getCall() const {
+ assert(seenAnyCalls(getKind()) && "ParameterStatus doesn't have a call");
+ return *Call;
+ }
+ static bool seenAnyCalls(Kind K) {
+ return (K & DefinitelyCalled) == DefinitelyCalled && K != Reported;
+ }
+ bool seenAnyCalls() const { return seenAnyCalls(getKind()); }
+
+ static bool isErrorStatus(Kind K) { return K > NON_ERROR_STATUS; }
+ bool isErrorStatus() const { return isErrorStatus(getKind()); }
+
+ Kind getKind() const { return StatusKind; }
+
+ void join(const ParameterStatus &Other) {
+ // If we have a pointer already, let's keep it.
+ // For the purposes of the analysis, it doesn't really matter
+ // which call we report.
+ //
+ // If we don't have a pointer, let's take whatever gets joined.
+ if (!Call) {
+ Call = Other.Call;
+ }
+ // Join kinds.
+ StatusKind |= Other.getKind();
+ }
+
+ bool operator==(const ParameterStatus &Other) const {
+ // We compare only kinds, pointers on their own is only additional
+ // information.
+ return getKind() == Other.getKind();
+ }
+
+private:
+ // It would've been a perfect place to use llvm::PointerIntPair, but
+ // unfortunately NumLowBitsAvailable for clang::Expr had been reduced to 2.
+ Kind StatusKind = NotVisited;
+ const Expr *Call = nullptr;
+};
+
+/// State aggregates statuses of all tracked parameters.
+class State {
+public:
+ State(unsigned Size, ParameterStatus::Kind K = ParameterStatus::NotVisited)
+ : ParamData(Size, K) {}
+
+ /// Return status of a parameter with the given index.
+ /// \{
+ ParameterStatus &getStatusFor(unsigned Index) { return ParamData[Index]; }
+ const ParameterStatus &getStatusFor(unsigned Index) const {
+ return ParamData[Index];
+ }
+ /// \}
+
+ /// Return true if parameter with the given index can be called.
+ bool seenAnyCalls(unsigned Index) const {
+ return getStatusFor(Index).seenAnyCalls();
+ }
+ /// Return a reference that we consider a call.
+ ///
+ /// Should only be used for parameters that can be called.
+ const Expr &getCallFor(unsigned Index) const {
+ return getStatusFor(Index).getCall();
+ }
+ /// Return status kind of parameter with the given index.
+ ParameterStatus::Kind getKindFor(unsigned Index) const {
+ return getStatusFor(Index).getKind();
+ }
+
+ bool isVisited() const {
+ return llvm::all_of(ParamData, [](const ParameterStatus &S) {
+ return S.getKind() != ParameterStatus::NotVisited;
+ });
+ }
+
+ // Join other state into the current state.
+ void join(const State &Other) {
+ assert(ParamData.size() == Other.ParamData.size() &&
+ "Couldn't join statuses with different sizes");
+ for (auto Pair : llvm::zip(ParamData, Other.ParamData)) {
+ std::get<0>(Pair).join(std::get<1>(Pair));
+ }
+ }
+
+ using iterator = ParamSizedVector<ParameterStatus>::iterator;
+ using const_iterator = ParamSizedVector<ParameterStatus>::const_iterator;
+
+ iterator begin() { return ParamData.begin(); }
+ iterator end() { return ParamData.end(); }
+
+ const_iterator begin() const { return ParamData.begin(); }
+ const_iterator end() const { return ParamData.end(); }
+
+ bool operator==(const State &Other) const {
+ return ParamData == Other.ParamData;
+ }
+
+private:
+ ParamSizedVector<ParameterStatus> ParamData;
+};
+
+/// A simple class that finds DeclRefExpr in the given expression.
+///
+/// However, we don't want to find ANY nested DeclRefExpr skipping whatever
+/// expressions on our way. Only certain expressions considered "no-op"
+/// for our task are indeed skipped.
+class DeclRefFinder
+ : public ConstStmtVisitor<DeclRefFinder, const DeclRefExpr *> {
+public:
+ /// Find a DeclRefExpr in the given expression.
+ ///
+ /// In its most basic form (ShouldRetrieveFromComparisons == false),
+ /// this function can be simply reduced to the following question:
+ ///
+ /// - If expression E is used as a function argument, could we say
+ /// that DeclRefExpr nested in E is used as an argument?
+ ///
+ /// According to this rule, we can say that parens, casts and dereferencing
+ /// (dereferencing only applied to function pointers, but this is our case)
+ /// can be skipped.
+ ///
+ /// When we should look into comparisons the question changes to:
+ ///
+ /// - If expression E is used as a condition, could we say that
+ /// DeclRefExpr is being checked?
+ ///
+ /// And even though, these are two different questions, they have quite a lot
+ /// in common. Actually, we can say that whatever expression answers
+ /// positively the first question also fits the second question as well.
+ ///
+ /// In addition, we skip binary operators == and !=, and unary opeartor !.
+ static const DeclRefExpr *find(const Expr *E,
+ bool ShouldRetrieveFromComparisons = false) {
+ return DeclRefFinder(ShouldRetrieveFromComparisons).Visit(E);
+ }
+
+ const DeclRefExpr *VisitDeclRefExpr(const DeclRefExpr *DR) { return DR; }
+
+ const DeclRefExpr *VisitUnaryOperator(const UnaryOperator *UO) {
+ switch (UO->getOpcode()) {
+ case UO_LNot:
+ // We care about logical not only if we care about comparisons.
+ if (!ShouldRetrieveFromComparisons)
+ return nullptr;
+ LLVM_FALLTHROUGH;
+ // Function pointer/references can be dereferenced before a call.
+ // That doesn't make it, however, any different from a regular call.
+ // For this reason, dereference operation is a "no-op".
+ case UO_Deref:
+ return Visit(UO->getSubExpr());
+ default:
+ return nullptr;
+ }
+ }
+
+ const DeclRefExpr *VisitBinaryOperator(const BinaryOperator *BO) {
+ if (!ShouldRetrieveFromComparisons)
+ return nullptr;
+
+ switch (BO->getOpcode()) {
+ case BO_EQ:
+ case BO_NE: {
+ const DeclRefExpr *LHS = Visit(BO->getLHS());
+ return LHS ? LHS : Visit(BO->getRHS());
+ }
+ default:
+ return nullptr;
+ }
+ }
+
+ const DeclRefExpr *VisitOpaqueValueExpr(const OpaqueValueExpr *OVE) {
+ return Visit(OVE->getSourceExpr());
+ }
+
+ const DeclRefExpr *VisitExpr(const Expr *E) {
+ // It is a fallback method that gets called whenever the actual type
+ // of the given expression is not covered.
+ //
+ // We first check if we have anything to skip. And then repeat the whole
+ // procedure for a nested expression instead.
+ const Expr *DeclutteredExpr = E->IgnoreParenCasts();
+ return E != DeclutteredExpr ? Visit(DeclutteredExpr) : nullptr;
+ }
+
+private:
+ DeclRefFinder(bool ShouldRetrieveFromComparisons)
+ : ShouldRetrieveFromComparisons(ShouldRetrieveFromComparisons) {}
+
+ bool ShouldRetrieveFromComparisons;
+};
+
+const DeclRefExpr *findDeclRefExpr(const Expr *In,
+ bool ShouldRetrieveFromComparisons = false) {
+ return DeclRefFinder::find(In, ShouldRetrieveFromComparisons);
+}
+
+const ParmVarDecl *
+findReferencedParmVarDecl(const Expr *In,
+ bool ShouldRetrieveFromComparisons = false) {
+ if (const DeclRefExpr *DR =
+ findDeclRefExpr(In, ShouldRetrieveFromComparisons)) {
+ return dyn_cast<ParmVarDecl>(DR->getDecl());
+ }
+
+ return nullptr;
+}
+
+/// Return conditions expression of a statement if it has one.
+const Expr *getCondition(const Stmt *S) {
+ if (!S) {
+ return nullptr;
+ }
+
+ if (const auto *If = dyn_cast<IfStmt>(S)) {
+ return If->getCond();
+ }
+ if (const auto *Ternary = dyn_cast<AbstractConditionalOperator>(S)) {
+ return Ternary->getCond();
+ }
+
+ return nullptr;
+}
+
+/// A small helper class that collects all named identifiers in the given
+/// expression. It traverses it recursively, so names from deeper levels
+/// of the AST will end up in the results.
+/// Results might have duplicate names, if this is a problem, convert to
+/// string sets afterwards.
+class NamesCollector : public RecursiveASTVisitor<NamesCollector> {
+public:
+ static constexpr unsigned EXPECTED_NUMBER_OF_NAMES = 5;
+ using NameCollection =
+ llvm::SmallVector<llvm::StringRef, EXPECTED_NUMBER_OF_NAMES>;
+
+ static NameCollection collect(const Expr *From) {
+ NamesCollector Impl;
+ Impl.TraverseStmt(const_cast<Expr *>(From));
+ return Impl.Result;
+ }
+
+ bool VisitDeclRefExpr(const DeclRefExpr *E) {
+ Result.push_back(E->getDecl()->getName());
+ return true;
+ }
+
+ bool VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *E) {
+ llvm::StringRef Name;
+
+ if (E->isImplicitProperty()) {
+ ObjCMethodDecl *PropertyMethodDecl = nullptr;
+ if (E->isMessagingGetter()) {
+ PropertyMethodDecl = E->getImplicitPropertyGetter();
+ } else {
+ PropertyMethodDecl = E->getImplicitPropertySetter();
+ }
+ assert(PropertyMethodDecl &&
+ "Implicit property must have associated declaration");
+ Name = PropertyMethodDecl->getSelector().getNameForSlot(0);
+ } else {
+ assert(E->isExplicitProperty());
+ Name = E->getExplicitProperty()->getName();
+ }
+
+ Result.push_back(Name);
+ return true;
+ }
+
+private:
+ NamesCollector() = default;
+ NameCollection Result;
+};
+
+/// Check whether the given expression mentions any of conventional names.
+bool mentionsAnyOfConventionalNames(const Expr *E) {
+ NamesCollector::NameCollection MentionedNames = NamesCollector::collect(E);
+
+ return llvm::any_of(MentionedNames, [](llvm::StringRef ConditionName) {
+ return llvm::any_of(
+ CONVENTIONAL_CONDITIONS,
+ [ConditionName](const llvm::StringLiteral &Conventional) {
+ return ConditionName.contains_lower(Conventional);
+ });
+ });
+}
+
+/// Clarification is a simple pair of a reason why parameter is not called
+/// on every path and a statement to blame.
+struct Clarification {
+ NeverCalledReason Reason;
+ const Stmt *Location;
+};
+
+/// A helper class that can produce a clarification based on the given pair
+/// of basic blocks.
+class NotCalledClarifier
+ : public ConstStmtVisitor<NotCalledClarifier,
+ llvm::Optional<Clarification>> {
+public:
+ /// The main entrypoint for the class, the function that tries to find the
+ /// clarification of how to explain which sub-path starts with a CFG edge
+ /// from Conditional to SuccWithoutCall.
+ ///
+ /// This means that this function has one precondition:
+ /// SuccWithoutCall should be a successor block for Conditional.
+ ///
+ /// Because clarification is not needed for non-trivial pairs of blocks
+ /// (i.e. SuccWithoutCall is not the only successor), it returns meaningful
+ /// results only for such cases. For this very reason, the parent basic
+ /// block, Conditional, is named that way, so it is clear what kind of
+ /// block is expected.
+ static llvm::Optional<Clarification>
+ clarify(const CFGBlock *Conditional, const CFGBlock *SuccWithoutCall) {
+ if (const Stmt *Terminator = Conditional->getTerminatorStmt()) {
+ return NotCalledClarifier{Conditional, SuccWithoutCall}.Visit(Terminator);
+ }
+ return llvm::None;
+ }
+
+ llvm::Optional<Clarification> VisitIfStmt(const IfStmt *If) {
+ return VisitBranchingBlock(If, NeverCalledReason::IfThen);
+ }
+
+ llvm::Optional<Clarification>
+ VisitAbstractConditionalOperator(const AbstractConditionalOperator *Ternary) {
+ return VisitBranchingBlock(Ternary, NeverCalledReason::IfThen);
+ }
+
+ llvm::Optional<Clarification> VisitSwitchStmt(const SwitchStmt *Switch) {
+ const Stmt *CaseToBlame = SuccInQuestion->getLabel();
+ if (!CaseToBlame) {
+ // If interesting basic block is not labeled, it means that this
+ // basic block does not represent any of the cases.
+ return Clarification{NeverCalledReason::SwitchSkipped, Switch};
+ }
+
+ for (const SwitchCase *Case = Switch->getSwitchCaseList(); Case;
+ Case = Case->getNextSwitchCase()) {
+ if (Case == CaseToBlame) {
+ return Clarification{NeverCalledReason::Switch, Case};
+ }
+ }
+
+ llvm_unreachable("Found unexpected switch structure");
+ }
+
+ llvm::Optional<Clarification> VisitForStmt(const ForStmt *For) {
+ return VisitBranchingBlock(For, NeverCalledReason::LoopEntered);
+ }
+
+ llvm::Optional<Clarification> VisitWhileStmt(const WhileStmt *While) {
+ return VisitBranchingBlock(While, NeverCalledReason::LoopEntered);
+ }
+
+ llvm::Optional<Clarification>
+ VisitBranchingBlock(const Stmt *Terminator, NeverCalledReason DefaultReason) {
+ assert(Parent->succ_size() == 2 &&
+ "Branching block should have exactly two successors");
+ unsigned SuccessorIndex = getSuccessorIndex(Parent, SuccInQuestion);
+ NeverCalledReason ActualReason =
+ updateForSuccessor(DefaultReason, SuccessorIndex);
+ return Clarification{ActualReason, Terminator};
+ }
+
+ llvm::Optional<Clarification> VisitBinaryOperator(const BinaryOperator *) {
+ // We don't want to report on short-curcuit logical operations.
+ return llvm::None;
+ }
+
+ llvm::Optional<Clarification> VisitStmt(const Stmt *Terminator) {
+ // If we got here, we didn't have a visit function for more derived
+ // classes of statement that this terminator actually belongs to.
+ //
+ // This is not a good scenario and should not happen in practice, but
+ // at least we'll warn the user.
+ return Clarification{NeverCalledReason::FallbackReason, Terminator};
+ }
+
+ static unsigned getSuccessorIndex(const CFGBlock *Parent,
+ const CFGBlock *Child) {
+ CFGBlock::const_succ_iterator It = llvm::find(Parent->succs(), Child);
+ assert(It != Parent->succ_end() &&
+ "Given blocks should be in parent-child relationship");
+ return It - Parent->succ_begin();
+ }
+
+ static NeverCalledReason
+ updateForSuccessor(NeverCalledReason ReasonForTrueBranch,
+ unsigned SuccessorIndex) {
+ assert(SuccessorIndex <= 1);
+ unsigned RawReason =
+ static_cast<unsigned>(ReasonForTrueBranch) + SuccessorIndex;
+ assert(RawReason <=
+ static_cast<unsigned>(NeverCalledReason::LARGEST_VALUE));
+ return static_cast<NeverCalledReason>(RawReason);
+ }
+
+private:
+ NotCalledClarifier(const CFGBlock *Parent, const CFGBlock *SuccInQuestion)
+ : Parent(Parent), SuccInQuestion(SuccInQuestion) {}
+
+ const CFGBlock *Parent, *SuccInQuestion;
+};
+
+class CalledOnceChecker : public ConstStmtVisitor<CalledOnceChecker> {
+public:
+ static void check(AnalysisDeclContext &AC, CalledOnceCheckHandler &Handler,
+ bool CheckConventionalParameters) {
+ CalledOnceChecker(AC, Handler, CheckConventionalParameters).check();
+ }
+
+private:
+ CalledOnceChecker(AnalysisDeclContext &AC, CalledOnceCheckHandler &Handler,
+ bool CheckConventionalParameters)
+ : FunctionCFG(*AC.getCFG()), AC(AC), Handler(Handler),
+ CheckConventionalParameters(CheckConventionalParameters),
+ CurrentState(0) {
+ initDataStructures();
+ assert((size() == 0 || !States.empty()) &&
+ "Data structures are inconsistent");
+ }
+
+ //===----------------------------------------------------------------------===//
+ // Initializing functions
+ //===----------------------------------------------------------------------===//
+
+ void initDataStructures() {
+ const Decl *AnalyzedDecl = AC.getDecl();
+
+ if (const auto *Function = dyn_cast<FunctionDecl>(AnalyzedDecl)) {
+ findParamsToTrack(Function);
+ } else if (const auto *Method = dyn_cast<ObjCMethodDecl>(AnalyzedDecl)) {
+ findParamsToTrack(Method);
+ } else if (const auto *Block = dyn_cast<BlockDecl>(AnalyzedDecl)) {
+ findCapturesToTrack(Block);
+ findParamsToTrack(Block);
+ }
+
+ // Have something to track, let's init states for every block from the CFG.
+ if (size() != 0) {
+ States =
+ CFGSizedVector<State>(FunctionCFG.getNumBlockIDs(), State(size()));
+ }
+ }
+
+ void findCapturesToTrack(const BlockDecl *Block) {
+ for (const auto &Capture : Block->captures()) {
+ if (const auto *P = dyn_cast<ParmVarDecl>(Capture.getVariable())) {
+ // Parameter DeclContext is its owning function or method.
+ const DeclContext *ParamContext = P->getDeclContext();
+ if (shouldBeCalledOnce(ParamContext, P)) {
+ TrackedParams.push_back(P);
+ }
+ }
+ }
+ }
+
+ template <class FunctionLikeDecl>
+ void findParamsToTrack(const FunctionLikeDecl *Function) {
+ for (unsigned Index : llvm::seq<unsigned>(0u, Function->param_size())) {
+ if (shouldBeCalledOnce(Function, Index)) {
+ TrackedParams.push_back(Function->getParamDecl(Index));
+ }
+ }
+ }
+
+ //===----------------------------------------------------------------------===//
+ // Main logic 'check' functions
+ //===----------------------------------------------------------------------===//
+
+ void check() {
+ // Nothing to check here: we don't have marked parameters.
+ if (size() == 0 || isPossiblyEmptyImpl())
+ return;
+
+ assert(
+ llvm::none_of(States, [](const State &S) { return S.isVisited(); }) &&
+ "None of the blocks should be 'visited' before the analysis");
+
+ // For our task, both backward and forward approaches suite well.
+ // However, in order to report better diagnostics, we decided to go with
+ // backward analysis.
+ //
+ // Let's consider the following CFG and how forward and backward analyses
+ // will work for it.
+ //
+ // FORWARD: | BACKWARD:
+ // #1 | #1
+ // +---------+ | +-----------+
+ // | if | | |MaybeCalled|
+ // +---------+ | +-----------+
+ // |NotCalled| | | if |
+ // +---------+ | +-----------+
+ // / \ | / \
+ // #2 / \ #3 | #2 / \ #3
+ // +----------------+ +---------+ | +----------------+ +---------+
+ // | foo() | | ... | | |DefinitelyCalled| |NotCalled|
+ // +----------------+ +---------+ | +----------------+ +---------+
+ // |DefinitelyCalled| |NotCalled| | | foo() | | ... |
+ // +----------------+ +---------+ | +----------------+ +---------+
+ // \ / | \ /
+ // \ #4 / | \ #4 /
+ // +-----------+ | +---------+
+ // | ... | | |NotCalled|
+ // +-----------+ | +---------+
+ // |MaybeCalled| | | ... |
+ // +-----------+ | +---------+
+ //
+ // The most natural way to report lacking call in the block #3 would be to
+ // message that the false branch of the if statement in the block #1 doesn't
+ // have a call. And while with the forward approach we'll need to find a
+ // least common ancestor or something like that to find the 'if' to blame,
+ // backward analysis gives it to us out of the box.
+ BackwardDataflowWorklist Worklist(FunctionCFG, AC);
+
+ // Let's visit EXIT.
+ const CFGBlock *Exit = &FunctionCFG.getExit();
+ assignState(Exit, State(size(), ParameterStatus::NotCalled));
+ Worklist.enqueuePredecessors(Exit);
+
+ while (const CFGBlock *BB = Worklist.dequeue()) {
+ assert(BB && "Worklist should filter out null blocks");
+ check(BB);
+ assert(CurrentState.isVisited() &&
+ "After the check, basic block should be visited");
+
+ // Traverse successor basic blocks if the status of this block
+ // has changed.
+ if (assignState(BB, CurrentState)) {
+ Worklist.enqueuePredecessors(BB);
+ }
+ }
+
+ // Check that we have all tracked parameters at the last block.
+ // As we are performing a backward version of the analysis,
+ // it should be the ENTRY block.
+ checkEntry(&FunctionCFG.getEntry());
+ }
+
+ void check(const CFGBlock *BB) {
+ // We start with a state 'inherited' from all the successors.
+ CurrentState = joinSuccessors(BB);
+ assert(CurrentState.isVisited() &&
+ "Shouldn't start with a 'not visited' state");
+
+ // This is the 'exit' situation, broken promises are probably OK
+ // in such scenarios.
+ if (BB->hasNoReturnElement()) {
+ markNoReturn();
+ // This block still can have calls (even multiple calls) and
+ // for this reason there is no early return here.
+ }
+
+ // We use a backward dataflow propagation and for this reason we
+ // should traverse basic blocks bottom-up.
+ for (const CFGElement &Element : llvm::reverse(*BB)) {
+ if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) {
+ check(S->getStmt());
+ }
+ }
+ }
+ void check(const Stmt *S) { Visit(S); }
+
+ void checkEntry(const CFGBlock *Entry) {
+ // We finalize this algorithm with the ENTRY block because
+ // we use a backward version of the analysis. This is where
+ // we can judge that some of the tracked parameters are not called on
+ // every path from ENTRY to EXIT.
+
+ const State &EntryStatus = getState(Entry);
+ llvm::BitVector NotCalledOnEveryPath(size(), false);
+ llvm::BitVector NotUsedOnEveryPath(size(), false);
+
+ // Check if there are no calls of the marked parameter at all
+ for (const auto &IndexedStatus : llvm::enumerate(EntryStatus)) {
+ const ParmVarDecl *Parameter = getParameter(IndexedStatus.index());
+
+ switch (IndexedStatus.value().getKind()) {
+ case ParameterStatus::NotCalled:
+ // If there were places where this parameter escapes (aka being used),
+ // we can provide a more useful diagnostic by pointing at the exact
+ // branches where it is not even mentioned.
+ if (!hasEverEscaped(IndexedStatus.index())) {
+ // This parameter is was not used at all, so we should report the
+ // most generic version of the warning.
+ if (isCaptured(Parameter)) {
+ // We want to specify that it was captured by the block.
+ Handler.handleCapturedNeverCalled(Parameter, AC.getDecl(),
+ !isExplicitlyMarked(Parameter));
+ } else {
+ Handler.handleNeverCalled(Parameter,
+ !isExplicitlyMarked(Parameter));
+ }
+ } else {
+ // Mark it as 'interesting' to figure out which paths don't even
+ // have escapes.
+ NotUsedOnEveryPath[IndexedStatus.index()] = true;
+ }
+
+ break;
+ case ParameterStatus::MaybeCalled:
+ // If we have 'maybe called' at this point, we have an error
+ // that there is at least one path where this parameter
+ // is not called.
+ //
+ // However, reporting the warning with only that information can be
+ // too vague for the users. For this reason, we mark such parameters
+ // as "interesting" for further analysis.
+ NotCalledOnEveryPath[IndexedStatus.index()] = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Early exit if we don't have parameters for extra analysis.
+ if (NotCalledOnEveryPath.none() && NotUsedOnEveryPath.none())
+ return;
+
+ // We are looking for a pair of blocks A, B so that the following is true:
+ // * A is a predecessor of B
+ // * B is marked as NotCalled
+ // * A has at least one successor marked as either
+ // Escaped or DefinitelyCalled
+ //
+ // In that situation, it is guaranteed that B is the first block of the path
+ // where the user doesn't call or use parameter in question.
+ //
+ // For this reason, branch A -> B can be used for reporting.
+ //
+ // This part of the algorithm is guarded by a condition that the function
+ // does indeed have a violation of contract. For this reason, we can
+ // spend more time to find a good spot to place the warning.
+ //
+ // The following algorithm has the worst case complexity of O(V + E),
+ // where V is the number of basic blocks in FunctionCFG,
+ // E is the number of edges between blocks in FunctionCFG.
+ for (const CFGBlock *BB : FunctionCFG) {
+ if (!BB)
+ continue;
+
+ const State &BlockState = getState(BB);
+
+ for (unsigned Index : llvm::seq(0u, size())) {
+ // We don't want to use 'isLosingCall' here because we want to report
+ // the following situation as well:
+ //
+ // MaybeCalled
+ // | ... |
+ // MaybeCalled NotCalled
+ //
+ // Even though successor is not 'DefinitelyCalled', it is still useful
+ // to report it, it is still a path without a call.
+ if (NotCalledOnEveryPath[Index] &&
+ BlockState.getKindFor(Index) == ParameterStatus::MaybeCalled) {
+
+ findAndReportNotCalledBranches(BB, Index);
+ } else if (NotUsedOnEveryPath[Index] &&
+ isLosingEscape(BlockState, BB, Index)) {
+
+ findAndReportNotCalledBranches(BB, Index, /* IsEscape = */ true);
+ }
+ }
+ }
+ }
+
+ /// Check potential call of a tracked parameter.
+ void checkDirectCall(const CallExpr *Call) {
+ if (auto Index = getIndexOfCallee(Call)) {
+ processCallFor(*Index, Call);
+ }
+ }
+
+ /// Check the call expression for being an indirect call of one of the tracked
+ /// parameters. It is indirect in the sense that this particular call is not
+ /// calling the parameter itself, but rather uses it as the argument.
+ template <class CallLikeExpr>
+ void checkIndirectCall(const CallLikeExpr *CallOrMessage) {
+ // CallExpr::arguments does not interact nicely with llvm::enumerate.
+ llvm::ArrayRef<const Expr *> Arguments = llvm::makeArrayRef(
+ CallOrMessage->getArgs(), CallOrMessage->getNumArgs());
+
+ // Let's check if any of the call arguments is a point of interest.
+ for (const auto &Argument : llvm::enumerate(Arguments)) {
+ if (auto Index = getIndexOfExpression(Argument.value())) {
+ ParameterStatus &CurrentParamStatus = CurrentState.getStatusFor(*Index);
+
+ if (shouldBeCalledOnce(CallOrMessage, Argument.index())) {
+ // If the corresponding parameter is marked as 'called_once' we should
+ // consider it as a call.
+ processCallFor(*Index, CallOrMessage);
+ } else if (CurrentParamStatus.getKind() == ParameterStatus::NotCalled) {
+ // Otherwise, we mark this parameter as escaped, which can be
+ // interpreted both as called or not called depending on the context.
+ CurrentParamStatus = ParameterStatus::Escaped;
+ }
+ // Otherwise, let's keep the state as it is.
+ }
+ }
+ }
+
+ /// Process call of the parameter with the given index
+ void processCallFor(unsigned Index, const Expr *Call) {
+ ParameterStatus &CurrentParamStatus = CurrentState.getStatusFor(Index);
+
+ if (CurrentParamStatus.seenAnyCalls()) {
+
+ // At this point, this parameter was called, so this is a second call.
+ const ParmVarDecl *Parameter = getParameter(Index);
+ Handler.handleDoubleCall(
+ Parameter, &CurrentState.getCallFor(Index), Call,
+ !isExplicitlyMarked(Parameter),
+ // We are sure that the second call is definitely
+ // going to happen if the status is 'DefinitelyCalled'.
+ CurrentParamStatus.getKind() == ParameterStatus::DefinitelyCalled);
+
+ // Mark this parameter as already reported on, so we don't repeat
+ // warnings.
+ CurrentParamStatus = ParameterStatus::Reported;
+
+ } else if (CurrentParamStatus.getKind() != ParameterStatus::Reported) {
+ // If we didn't report anything yet, let's mark this parameter
+ // as called.
+ ParameterStatus Called(ParameterStatus::DefinitelyCalled, Call);
+ CurrentParamStatus = Called;
+ }
+ }
+
+ void findAndReportNotCalledBranches(const CFGBlock *Parent, unsigned Index,
+ bool IsEscape = false) {
+ for (const CFGBlock *Succ : Parent->succs()) {
+ if (!Succ)
+ continue;
+
+ if (getState(Succ).getKindFor(Index) == ParameterStatus::NotCalled) {
+ assert(Parent->succ_size() >= 2 &&
+ "Block should have at least two successors at this point");
+ if (auto Clarification = NotCalledClarifier::clarify(Parent, Succ)) {
+ const ParmVarDecl *Parameter = getParameter(Index);
+ Handler.handleNeverCalled(Parameter, Clarification->Location,
+ Clarification->Reason, !IsEscape,
+ !isExplicitlyMarked(Parameter));
+ }
+ }
+ }
+ }
+
+ //===----------------------------------------------------------------------===//
+ // Predicate functions to check parameters
+ //===----------------------------------------------------------------------===//
+
+ /// Return true if parameter is explicitly marked as 'called_once'.
+ static bool isExplicitlyMarked(const ParmVarDecl *Parameter) {
+ return Parameter->hasAttr<CalledOnceAttr>();
+ }
+
+ /// Return true if the given name matches conventional pattens.
+ static bool isConventional(llvm::StringRef Name) {
+ return llvm::count(CONVENTIONAL_NAMES, Name) != 0;
+ }
+
+ /// Return true if the given name has conventional suffixes.
+ static bool hasConventionalSuffix(llvm::StringRef Name) {
+ return llvm::any_of(CONVENTIONAL_SUFFIXES, [Name](llvm::StringRef Suffix) {
+ return Name.endswith(Suffix);
+ });
+ }
+
+ /// Return true if the given type can be used for conventional parameters.
+ static bool isConventional(QualType Ty) {
+ if (!Ty->isBlockPointerType()) {
+ return false;
+ }
+
+ QualType BlockType = Ty->getAs<BlockPointerType>()->getPointeeType();
+ // Completion handlers should have a block type with void return type.
+ return BlockType->getAs<FunctionType>()->getReturnType()->isVoidType();
+ }
+
+ /// Return true if the only parameter of the function is conventional.
+ static bool isOnlyParameterConventional(const FunctionDecl *Function) {
+ IdentifierInfo *II = Function->getIdentifier();
+ return Function->getNumParams() == 1 && II &&
+ hasConventionalSuffix(II->getName());
+ }
+
+ /// Return true/false if 'swift_async' attribute states that the given
+ /// parameter is conventionally called once.
+ /// Return llvm::None if the given declaration doesn't have 'swift_async'
+ /// attribute.
+ static llvm::Optional<bool> isConventionalSwiftAsync(const Decl *D,
+ unsigned ParamIndex) {
+ if (const SwiftAsyncAttr *A = D->getAttr<SwiftAsyncAttr>()) {
+ if (A->getKind() == SwiftAsyncAttr::None) {
+ return false;
+ }
+
+ return A->getCompletionHandlerIndex().getASTIndex() == ParamIndex;
+ }
+ return llvm::None;
+ }
+
+ /// Return true if the specified selector piece matches conventions.
+ static bool isConventionalSelectorPiece(Selector MethodSelector,
+ unsigned PieceIndex,
+ QualType PieceType) {
+ if (!isConventional(PieceType)) {
+ return false;
+ }
+
+ if (MethodSelector.getNumArgs() == 1) {
+ assert(PieceIndex == 0);
+ return hasConventionalSuffix(MethodSelector.getNameForSlot(0));
+ }
+
+ return isConventional(MethodSelector.getNameForSlot(PieceIndex));
+ }
+
+ bool shouldBeCalledOnce(const ParmVarDecl *Parameter) const {
+ return isExplicitlyMarked(Parameter) ||
+ (CheckConventionalParameters &&
+ isConventional(Parameter->getName()) &&
+ isConventional(Parameter->getType()));
+ }
+
+ bool shouldBeCalledOnce(const DeclContext *ParamContext,
+ const ParmVarDecl *Param) {
+ unsigned ParamIndex = Param->getFunctionScopeIndex();
+ if (const auto *Function = dyn_cast<FunctionDecl>(ParamContext)) {
+ return shouldBeCalledOnce(Function, ParamIndex);
+ }
+ if (const auto *Method = dyn_cast<ObjCMethodDecl>(ParamContext)) {
+ return shouldBeCalledOnce(Method, ParamIndex);
+ }
+ return shouldBeCalledOnce(Param);
+ }
+
+ bool shouldBeCalledOnce(const BlockDecl *Block, unsigned ParamIndex) const {
+ return shouldBeCalledOnce(Block->getParamDecl(ParamIndex));
+ }
+
+ bool shouldBeCalledOnce(const FunctionDecl *Function,
+ unsigned ParamIndex) const {
+ if (ParamIndex >= Function->getNumParams()) {
+ return false;
+ }
+ // 'swift_async' goes first and overrides anything else.
+ if (auto ConventionalAsync =
+ isConventionalSwiftAsync(Function, ParamIndex)) {
+ return ConventionalAsync.getValue();
+ }
+
+ return shouldBeCalledOnce(Function->getParamDecl(ParamIndex)) ||
+ (CheckConventionalParameters &&
+ isOnlyParameterConventional(Function));
+ }
+
+ bool shouldBeCalledOnce(const ObjCMethodDecl *Method,
+ unsigned ParamIndex) const {
+ Selector MethodSelector = Method->getSelector();
+ if (ParamIndex >= MethodSelector.getNumArgs()) {
+ return false;
+ }
+
+ // 'swift_async' goes first and overrides anything else.
+ if (auto ConventionalAsync = isConventionalSwiftAsync(Method, ParamIndex)) {
+ return ConventionalAsync.getValue();
+ }
+
+ const ParmVarDecl *Parameter = Method->getParamDecl(ParamIndex);
+ return shouldBeCalledOnce(Parameter) ||
+ (CheckConventionalParameters &&
+ isConventionalSelectorPiece(MethodSelector, ParamIndex,
+ Parameter->getType()));
+ }
+
+ bool shouldBeCalledOnce(const CallExpr *Call, unsigned ParamIndex) const {
+ const FunctionDecl *Function = Call->getDirectCallee();
+ return Function && shouldBeCalledOnce(Function, ParamIndex);
+ }
+
+ bool shouldBeCalledOnce(const ObjCMessageExpr *Message,
+ unsigned ParamIndex) const {
+ const ObjCMethodDecl *Method = Message->getMethodDecl();
+ return Method && ParamIndex < Method->param_size() &&
+ shouldBeCalledOnce(Method, ParamIndex);
+ }
+
+ //===----------------------------------------------------------------------===//
+ // Utility methods
+ //===----------------------------------------------------------------------===//
+
+ bool isCaptured(const ParmVarDecl *Parameter) const {
+ if (const BlockDecl *Block = dyn_cast<BlockDecl>(AC.getDecl())) {
+ return Block->capturesVariable(Parameter);
+ }
+ return false;
+ }
+
+ /// Return true if the analyzed function is actually a default implementation
+ /// of the method that has to be overriden.
+ ///
+ /// These functions can have tracked parameters, but wouldn't call them
+ /// because they are not designed to perform any meaningful actions.
+ ///
+ /// There are a couple of flavors of such default implementations:
+ /// 1. Empty methods or methods with a single return statement
+ /// 2. Methods that have one block with a call to no return function
+ /// 3. Methods with only assertion-like operations
+ bool isPossiblyEmptyImpl() const {
+ if (!isa<ObjCMethodDecl>(AC.getDecl())) {
+ // We care only about functions that are not supposed to be called.
+ // Only methods can be overriden.
+ return false;
+ }
+
+ // Case #1 (without return statements)
+ if (FunctionCFG.size() == 2) {
+ // Method has only two blocks: ENTRY and EXIT.
+ // This is equivalent to empty function.
+ return true;
+ }
+
+ // Case #2
+ if (FunctionCFG.size() == 3) {
+ const CFGBlock &Entry = FunctionCFG.getEntry();
+ if (Entry.succ_empty()) {
+ return false;
+ }
+
+ const CFGBlock *OnlyBlock = *Entry.succ_begin();
+ // Method has only one block, let's see if it has a no-return
+ // element.
+ if (OnlyBlock && OnlyBlock->hasNoReturnElement()) {
+ return true;
+ }
+ // Fallthrough, CFGs with only one block can fall into #1 and #3 as well.
+ }
+
+ // Cases #1 (return statements) and #3.
+ //
+ // It is hard to detect that something is an assertion or came
+ // from assertion. Here we use a simple heuristic:
+ //
+ // - If it came from a macro, it can be an assertion.
+ //
+ // Additionally, we can't assume a number of basic blocks or the CFG's
+ // structure because assertions might include loops and conditions.
+ return llvm::all_of(FunctionCFG, [](const CFGBlock *BB) {
+ if (!BB) {
+ // Unreachable blocks are totally fine.
+ return true;
+ }
+
+ // Return statements can have sub-expressions that are represented as
+ // separate statements of a basic block. We should allow this.
+ // This parent map will be initialized with a parent tree for all
+ // subexpressions of the block's return statement (if it has one).
+ std::unique_ptr<ParentMap> ReturnChildren;
+
+ return llvm::all_of(
+ llvm::reverse(*BB), // we should start with return statements, if we
+ // have any, i.e. from the bottom of the block
+ [&ReturnChildren](const CFGElement &Element) {
+ if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) {
+ const Stmt *SuspiciousStmt = S->getStmt();
+
+ if (isa<ReturnStmt>(SuspiciousStmt)) {
+ // Let's initialize this structure to test whether
+ // some further statement is a part of this return.
+ ReturnChildren = std::make_unique<ParentMap>(
+ const_cast<Stmt *>(SuspiciousStmt));
+ // Return statements are allowed as part of #1.
+ return true;
+ }
+
+ return SuspiciousStmt->getBeginLoc().isMacroID() ||
+ (ReturnChildren &&
+ ReturnChildren->hasParent(SuspiciousStmt));
+ }
+ return true;
+ });
+ });
+ }
+
+ /// Check if parameter with the given index has ever escaped.
+ bool hasEverEscaped(unsigned Index) const {
+ return llvm::any_of(States, [Index](const State &StateForOneBB) {
+ return StateForOneBB.getKindFor(Index) == ParameterStatus::Escaped;
+ });
+ }
+
+ /// Return status stored for the given basic block.
+ /// \{
+ State &getState(const CFGBlock *BB) {
+ assert(BB);
+ return States[BB->getBlockID()];
+ }
+ const State &getState(const CFGBlock *BB) const {
+ assert(BB);
+ return States[BB->getBlockID()];
+ }
+ /// \}
+
+ /// Assign status to the given basic block.
+ ///
+ /// Returns true when the stored status changed.
+ bool assignState(const CFGBlock *BB, const State &ToAssign) {
+ State &Current = getState(BB);
+ if (Current == ToAssign) {
+ return false;
+ }
+
+ Current = ToAssign;
+ return true;
+ }
+
+ /// Join all incoming statuses for the given basic block.
+ State joinSuccessors(const CFGBlock *BB) const {
+ auto Succs =
+ llvm::make_filter_range(BB->succs(), [this](const CFGBlock *Succ) {
+ return Succ && this->getState(Succ).isVisited();
+ });
+ // We came to this block from somewhere after all.
+ assert(!Succs.empty() &&
+ "Basic block should have at least one visited successor");
+
+ State Result = getState(*Succs.begin());
+
+ for (const CFGBlock *Succ : llvm::drop_begin(Succs, 1)) {
+ Result.join(getState(Succ));
+ }
+
+ if (const Expr *Condition = getCondition(BB->getTerminatorStmt())) {
+ handleConditional(BB, Condition, Result);
+ }
+
+ return Result;
+ }
+
+ void handleConditional(const CFGBlock *BB, const Expr *Condition,
+ State &ToAlter) const {
+ handleParameterCheck(BB, Condition, ToAlter);
+ if (SuppressOnConventionalErrorPaths) {
+ handleConventionalCheck(BB, Condition, ToAlter);
+ }
+ }
+
+ void handleParameterCheck(const CFGBlock *BB, const Expr *Condition,
+ State &ToAlter) const {
+ // In this function, we try to deal with the following pattern:
+ //
+ // if (parameter)
+ // parameter(...);
+ //
+ // It's not good to show a warning here because clearly 'parameter'
+ // couldn't and shouldn't be called on the 'else' path.
+ //
+ // Let's check if this if statement has a check involving one of
+ // the tracked parameters.
+ if (const ParmVarDecl *Parameter = findReferencedParmVarDecl(
+ Condition,
+ /* ShouldRetrieveFromComparisons = */ true)) {
+ if (const auto Index = getIndex(*Parameter)) {
+ ParameterStatus &CurrentStatus = ToAlter.getStatusFor(*Index);
+
+ // We don't want to deep dive into semantics of the check and
+ // figure out if that check was for null or something else.
+ // We simply trust the user that they know what they are doing.
+ //
+ // For this reason, in the following loop we look for the
+ // best-looking option.
+ for (const CFGBlock *Succ : BB->succs()) {
+ if (!Succ)
+ continue;
+
+ const ParameterStatus &StatusInSucc =
+ getState(Succ).getStatusFor(*Index);
+
+ if (StatusInSucc.isErrorStatus()) {
+ continue;
+ }
+
+ // Let's use this status instead.
+ CurrentStatus = StatusInSucc;
+
+ if (StatusInSucc.getKind() == ParameterStatus::DefinitelyCalled) {
+ // This is the best option to have and we already found it.
+ break;
+ }
+
+ // If we found 'Escaped' first, we still might find 'DefinitelyCalled'
+ // on the other branch. And we prefer the latter.
+ }
+ }
+ }
+ }
+
+ void handleConventionalCheck(const CFGBlock *BB, const Expr *Condition,
+ State &ToAlter) const {
+ // Even when the analysis is technically correct, it is a widespread pattern
+ // not to call completion handlers in some scenarios. These usually have
+ // typical conditional names, such as 'error' or 'cancel'.
+ if (!mentionsAnyOfConventionalNames(Condition)) {
+ return;
+ }
+
+ for (const auto &IndexedStatus : llvm::enumerate(ToAlter)) {
+ const ParmVarDecl *Parameter = getParameter(IndexedStatus.index());
+ // Conventions do not apply to explicitly marked parameters.
+ if (isExplicitlyMarked(Parameter)) {
+ continue;
+ }
+
+ ParameterStatus &CurrentStatus = IndexedStatus.value();
+ // If we did find that on one of the branches the user uses the callback
+ // and doesn't on the other path, we believe that they know what they are
+ // doing and trust them.
+ //
+ // There are two possible scenarios for that:
+ // 1. Current status is 'MaybeCalled' and one of the branches is
+ // 'DefinitelyCalled'
+ // 2. Current status is 'NotCalled' and one of the branches is 'Escaped'
+ if (isLosingCall(ToAlter, BB, IndexedStatus.index()) ||
+ isLosingEscape(ToAlter, BB, IndexedStatus.index())) {
+ CurrentStatus = ParameterStatus::Escaped;
+ }
+ }
+ }
+
+ bool isLosingCall(const State &StateAfterJoin, const CFGBlock *JoinBlock,
+ unsigned ParameterIndex) const {
+ // Let's check if the block represents DefinitelyCalled -> MaybeCalled
+ // transition.
+ return isLosingJoin(StateAfterJoin, JoinBlock, ParameterIndex,
+ ParameterStatus::MaybeCalled,
+ ParameterStatus::DefinitelyCalled);
+ }
+
+ bool isLosingEscape(const State &StateAfterJoin, const CFGBlock *JoinBlock,
+ unsigned ParameterIndex) const {
+ // Let's check if the block represents Escaped -> NotCalled transition.
+ return isLosingJoin(StateAfterJoin, JoinBlock, ParameterIndex,
+ ParameterStatus::NotCalled, ParameterStatus::Escaped);
+ }
+
+ bool isLosingJoin(const State &StateAfterJoin, const CFGBlock *JoinBlock,
+ unsigned ParameterIndex, ParameterStatus::Kind AfterJoin,
+ ParameterStatus::Kind BeforeJoin) const {
+ assert(!ParameterStatus::isErrorStatus(BeforeJoin) &&
+ ParameterStatus::isErrorStatus(AfterJoin) &&
+ "It's not a losing join if statuses do not represent "
+ "correct-to-error transition");
+
+ const ParameterStatus &CurrentStatus =
+ StateAfterJoin.getStatusFor(ParameterIndex);
+
+ return CurrentStatus.getKind() == AfterJoin &&
+ anySuccessorHasStatus(JoinBlock, ParameterIndex, BeforeJoin);
+ }
+
+ /// Return true if any of the successors of the given basic block has
+ /// a specified status for the given parameter.
+ bool anySuccessorHasStatus(const CFGBlock *Parent, unsigned ParameterIndex,
+ ParameterStatus::Kind ToFind) const {
+ return llvm::any_of(
+ Parent->succs(), [this, ParameterIndex, ToFind](const CFGBlock *Succ) {
+ return Succ && getState(Succ).getKindFor(ParameterIndex) == ToFind;
+ });
+ }
+
+ /// Check given expression that was discovered to escape.
+ void checkEscapee(const Expr *E) {
+ if (const ParmVarDecl *Parameter = findReferencedParmVarDecl(E)) {
+ checkEscapee(*Parameter);
+ }
+ }
+
+ /// Check given parameter that was discovered to escape.
+ void checkEscapee(const ParmVarDecl &Parameter) {
+ if (auto Index = getIndex(Parameter)) {
+ ParameterStatus &CurrentParamStatus = CurrentState.getStatusFor(*Index);
+
+ if (CurrentParamStatus.getKind() == ParameterStatus::NotCalled) {
+ CurrentParamStatus = ParameterStatus::Escaped;
+ }
+ }
+ }
+
+ /// Mark all parameters in the current state as 'no-return'.
+ void markNoReturn() {
+ for (ParameterStatus &PS : CurrentState) {
+ PS = ParameterStatus::NoReturn;
+ }
+ }
+
+ /// Check if the given assignment represents suppression and act on it.
+ void checkSuppression(const BinaryOperator *Assignment) {
+ // Suppression has the following form:
+ // parameter = 0;
+ // 0 can be of any form (NULL, nil, etc.)
+ if (auto Index = getIndexOfExpression(Assignment->getLHS())) {
+
+ // We don't care what is written in the RHS, it could be whatever
+ // we can interpret as 0.
+ if (auto Constant =
+ Assignment->getRHS()->IgnoreParenCasts()->getIntegerConstantExpr(
+ AC.getASTContext())) {
+
+ ParameterStatus &CurrentParamStatus = CurrentState.getStatusFor(*Index);
+
+ if (0 == *Constant && CurrentParamStatus.seenAnyCalls()) {
+ // Even though this suppression mechanism is introduced to tackle
+ // false positives for multiple calls, the fact that the user has
+ // to use suppression can also tell us that we couldn't figure out
+ // how different paths cancel each other out. And if that is true,
+ // we will most certainly have false positives about parameters not
+ // being called on certain paths.
+ //
+ // For this reason, we abandon tracking this parameter altogether.
+ CurrentParamStatus = ParameterStatus::Reported;
+ }
+ }
+ }
+ }
+
+public:
+ //===----------------------------------------------------------------------===//
+ // Tree traversal methods
+ //===----------------------------------------------------------------------===//
+
+ void VisitCallExpr(const CallExpr *Call) {
+ // This call might be a direct call, i.e. a parameter call...
+ checkDirectCall(Call);
+ // ... or an indirect call, i.e. when parameter is an argument.
+ checkIndirectCall(Call);
+ }
+
+ void VisitObjCMessageExpr(const ObjCMessageExpr *Message) {
+ // The most common situation that we are defending against here is
+ // copying a tracked parameter.
+ if (const Expr *Receiver = Message->getInstanceReceiver()) {
+ checkEscapee(Receiver);
+ }
+ // Message expressions unlike calls, could not be direct.
+ checkIndirectCall(Message);
+ }
+
+ void VisitBlockExpr(const BlockExpr *Block) {
+ for (const auto &Capture : Block->getBlockDecl()->captures()) {
+ // If a block captures a tracked parameter, it should be
+ // considered escaped.
+ // On one hand, blocks that do that should definitely call it on
+ // every path. However, it is not guaranteed that the block
+ // itself gets called whenever it gets created.
+ //
+ // Because we don't want to track blocks and whether they get called,
+ // we consider such parameters simply escaped.
+ if (const auto *Param = dyn_cast<ParmVarDecl>(Capture.getVariable())) {
+ checkEscapee(*Param);
+ }
+ }
+ }
+
+ void VisitBinaryOperator(const BinaryOperator *Op) {
+ if (Op->getOpcode() == clang::BO_Assign) {
+ // Let's check if one of the tracked parameters is assigned into
+ // something, and if it is we don't want to track extra variables, so we
+ // consider it as an escapee.
+ checkEscapee(Op->getRHS());
+
+ // Let's check whether this assignment is a suppression.
+ checkSuppression(Op);
+ }
+ }
+
+ void VisitDeclStmt(const DeclStmt *DS) {
+ // Variable initialization is not assignment and should be handled
+ // separately.
+ //
+ // Multiple declarations can be a part of declaration statement.
+ for (const auto *Declaration : DS->getDeclGroup()) {
+ if (const auto *Var = dyn_cast<VarDecl>(Declaration)) {
+ if (Var->getInit()) {
+ checkEscapee(Var->getInit());
+ }
+ }
+ }
+ }
+
+ void VisitCStyleCastExpr(const CStyleCastExpr *Cast) {
+ // We consider '(void)parameter' as a manual no-op escape.
+ // It should be used to explicitly tell the analysis that this parameter
+ // is intentionally not called on this path.
+ if (Cast->getType().getCanonicalType()->isVoidType()) {
+ checkEscapee(Cast->getSubExpr());
+ }
+ }
+
+ void VisitObjCAtThrowStmt(const ObjCAtThrowStmt *) {
+ // It is OK not to call marked parameters on exceptional paths.
+ markNoReturn();
+ }
+
+private:
+ unsigned size() const { return TrackedParams.size(); }
+
+ llvm::Optional<unsigned> getIndexOfCallee(const CallExpr *Call) const {
+ return getIndexOfExpression(Call->getCallee());
+ }
+
+ llvm::Optional<unsigned> getIndexOfExpression(const Expr *E) const {
+ if (const ParmVarDecl *Parameter = findReferencedParmVarDecl(E)) {
+ return getIndex(*Parameter);
+ }
+
+ return llvm::None;
+ }
+
+ llvm::Optional<unsigned> getIndex(const ParmVarDecl &Parameter) const {
+ // Expected number of parameters that we actually track is 1.
+ //
+ // Also, the maximum number of declared parameters could not be on a scale
+ // of hundreds of thousands.
+ //
+ // In this setting, linear search seems reasonable and even performs better
+ // than bisection.
+ ParamSizedVector<const ParmVarDecl *>::const_iterator It =
+ llvm::find(TrackedParams, &Parameter);
+
+ if (It != TrackedParams.end()) {
+ return It - TrackedParams.begin();
+ }
+
+ return llvm::None;
+ }
+
+ const ParmVarDecl *getParameter(unsigned Index) const {
+ assert(Index < TrackedParams.size());
+ return TrackedParams[Index];
+ }
+
+ const CFG &FunctionCFG;
+ AnalysisDeclContext &AC;
+ CalledOnceCheckHandler &Handler;
+ bool CheckConventionalParameters;
+ // As of now, we turn this behavior off. So, we still are going to report
+ // missing calls on paths that look like it was intentional.
+ // Technically such reports are true positives, but they can make some users
+ // grumpy because of the sheer number of warnings.
+ // It can be turned back on if we decide that we want to have the other way
+ // around.
+ bool SuppressOnConventionalErrorPaths = false;
+
+ State CurrentState;
+ ParamSizedVector<const ParmVarDecl *> TrackedParams;
+ CFGSizedVector<State> States;
+};
+
+} // end anonymous namespace
+
+namespace clang {
+void checkCalledOnceParameters(AnalysisDeclContext &AC,
+ CalledOnceCheckHandler &Handler,
+ bool CheckConventionalParameters) {
+ CalledOnceChecker::check(AC, Handler, CheckConventionalParameters);
+}
+} // end namespace clang
diff --git a/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index 2f80285f17b4..e9ff5e5e8765 100644
--- a/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -6,7 +6,10 @@
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ExprMutationAnalyzer.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
#include "llvm/ADT/STLExtras.h"
namespace clang {
@@ -24,11 +27,11 @@ AST_MATCHER_P(CXXForRangeStmt, hasRangeStmt,
return InnerMatcher.matches(*Range, Finder, Builder);
}
-AST_MATCHER_P(Expr, maybeEvalCommaExpr,
- ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
- const Expr* Result = &Node;
+AST_MATCHER_P(Expr, maybeEvalCommaExpr, ast_matchers::internal::Matcher<Expr>,
+ InnerMatcher) {
+ const Expr *Result = &Node;
while (const auto *BOComma =
- dyn_cast_or_null<BinaryOperator>(Result->IgnoreParens())) {
+ dyn_cast_or_null<BinaryOperator>(Result->IgnoreParens())) {
if (!BOComma->isCommaOp())
break;
Result = BOComma->getRHS();
@@ -36,6 +39,55 @@ AST_MATCHER_P(Expr, maybeEvalCommaExpr,
return InnerMatcher.matches(*Result, Finder, Builder);
}
+AST_MATCHER_P(Expr, canResolveToExpr, ast_matchers::internal::Matcher<Expr>,
+ InnerMatcher) {
+ auto DerivedToBase = [](const ast_matchers::internal::Matcher<Expr> &Inner) {
+ return implicitCastExpr(anyOf(hasCastKind(CK_DerivedToBase),
+ hasCastKind(CK_UncheckedDerivedToBase)),
+ hasSourceExpression(Inner));
+ };
+ auto IgnoreDerivedToBase =
+ [&DerivedToBase](const ast_matchers::internal::Matcher<Expr> &Inner) {
+ return ignoringParens(expr(anyOf(Inner, DerivedToBase(Inner))));
+ };
+
+ // The 'ConditionalOperator' matches on `<anything> ? <expr> : <expr>`.
+ // This matching must be recursive because `<expr>` can be anything resolving
+ // to the `InnerMatcher`, for example another conditional operator.
+ // The edge-case `BaseClass &b = <cond> ? DerivedVar1 : DerivedVar2;`
+ // is handled, too. The implicit cast happens outside of the conditional.
+ // This is matched by `IgnoreDerivedToBase(canResolveToExpr(InnerMatcher))`
+ // below.
+ auto const ConditionalOperator = conditionalOperator(anyOf(
+ hasTrueExpression(ignoringParens(canResolveToExpr(InnerMatcher))),
+ hasFalseExpression(ignoringParens(canResolveToExpr(InnerMatcher)))));
+ auto const ElvisOperator = binaryConditionalOperator(anyOf(
+ hasTrueExpression(ignoringParens(canResolveToExpr(InnerMatcher))),
+ hasFalseExpression(ignoringParens(canResolveToExpr(InnerMatcher)))));
+
+ auto const ComplexMatcher = ignoringParens(
+ expr(anyOf(IgnoreDerivedToBase(InnerMatcher),
+ maybeEvalCommaExpr(IgnoreDerivedToBase(InnerMatcher)),
+ IgnoreDerivedToBase(ConditionalOperator),
+ IgnoreDerivedToBase(ElvisOperator))));
+
+ return ComplexMatcher.matches(Node, Finder, Builder);
+}
+
+// Similar to 'hasAnyArgument', but does not work because 'InitListExpr' does
+// not have the 'arguments()' method.
+AST_MATCHER_P(InitListExpr, hasAnyInit, ast_matchers::internal::Matcher<Expr>,
+ InnerMatcher) {
+ for (const Expr *Arg : Node.inits()) {
+ ast_matchers::internal::BoundNodesTreeBuilder Result(*Builder);
+ if (InnerMatcher.matches(*Arg, Finder, &Result)) {
+ *Builder = std::move(Result);
+ return true;
+ }
+ }
+ return false;
+}
+
const ast_matchers::internal::VariadicDynCastAllOfMatcher<Stmt, CXXTypeidExpr>
cxxTypeidExpr;
@@ -43,10 +95,6 @@ AST_MATCHER(CXXTypeidExpr, isPotentiallyEvaluated) {
return Node.isPotentiallyEvaluated();
}
-const ast_matchers::internal::VariadicDynCastAllOfMatcher<Stmt,
- GenericSelectionExpr>
- genericSelectionExpr;
-
AST_MATCHER_P(GenericSelectionExpr, hasControllingExpr,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getControllingExpr(), Finder, Builder);
@@ -151,7 +199,7 @@ bool ExprMutationAnalyzer::isUnevaluated(const Expr *Exp) {
NodeID<Expr>::value,
match(
findAll(
- expr(equalsNode(Exp),
+ expr(canResolveToExpr(equalsNode(Exp)),
anyOf(
// `Exp` is part of the underlying expression of
// decltype/typeof if it has an ancestor of
@@ -202,29 +250,43 @@ const Stmt *ExprMutationAnalyzer::findDeclPointeeMutation(
const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// LHS of any assignment operators.
const auto AsAssignmentLhs = binaryOperator(
- isAssignmentOperator(),
- hasLHS(maybeEvalCommaExpr(ignoringParenImpCasts(equalsNode(Exp)))));
+ isAssignmentOperator(), hasLHS(canResolveToExpr(equalsNode(Exp))));
// Operand of increment/decrement operators.
const auto AsIncDecOperand =
unaryOperator(anyOf(hasOperatorName("++"), hasOperatorName("--")),
- hasUnaryOperand(maybeEvalCommaExpr(
- ignoringParenImpCasts(equalsNode(Exp)))));
+ hasUnaryOperand(canResolveToExpr(equalsNode(Exp))));
// Invoking non-const member function.
// A member function is assumed to be non-const when it is unresolved.
const auto NonConstMethod = cxxMethodDecl(unless(isConst()));
- const auto AsNonConstThis =
- expr(anyOf(cxxMemberCallExpr(callee(NonConstMethod),
- on(maybeEvalCommaExpr(equalsNode(Exp)))),
- cxxOperatorCallExpr(callee(NonConstMethod),
- hasArgument(0,
- maybeEvalCommaExpr(equalsNode(Exp)))),
- callExpr(callee(expr(anyOf(
- unresolvedMemberExpr(
- hasObjectExpression(maybeEvalCommaExpr(equalsNode(Exp)))),
- cxxDependentScopeMemberExpr(
- hasObjectExpression(maybeEvalCommaExpr(equalsNode(Exp))))))))));
+
+ const auto AsNonConstThis = expr(anyOf(
+ cxxMemberCallExpr(callee(NonConstMethod),
+ on(canResolveToExpr(equalsNode(Exp)))),
+ cxxOperatorCallExpr(callee(NonConstMethod),
+ hasArgument(0, canResolveToExpr(equalsNode(Exp)))),
+ // In case of a templated type, calling overloaded operators is not
+ // resolved and modelled as `binaryOperator` on a dependent type.
+ // Such instances are considered a modification, because they can modify
+ // in different instantiations of the template.
+ binaryOperator(hasEitherOperand(
+ allOf(ignoringImpCasts(canResolveToExpr(equalsNode(Exp))),
+ isTypeDependent()))),
+ // Within class templates and member functions the member expression might
+ // not be resolved. In that case, the `callExpr` is considered to be a
+ // modification.
+ callExpr(
+ callee(expr(anyOf(unresolvedMemberExpr(hasObjectExpression(
+ canResolveToExpr(equalsNode(Exp)))),
+ cxxDependentScopeMemberExpr(hasObjectExpression(
+ canResolveToExpr(equalsNode(Exp)))))))),
+ // Match on a call to a known method, but the call itself is type
+ // dependent (e.g. `vector<T> v; v.push(T{});` in a templated function).
+ callExpr(allOf(isTypeDependent(),
+ callee(memberExpr(hasDeclaration(NonConstMethod),
+ hasObjectExpression(canResolveToExpr(
+ equalsNode(Exp)))))))));
// Taking address of 'Exp'.
// We're assuming 'Exp' is mutated as soon as its address is taken, though in
@@ -234,38 +296,51 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
unaryOperator(hasOperatorName("&"),
// A NoOp implicit cast is adding const.
unless(hasParent(implicitCastExpr(hasCastKind(CK_NoOp)))),
- hasUnaryOperand(maybeEvalCommaExpr(equalsNode(Exp))));
+ hasUnaryOperand(canResolveToExpr(equalsNode(Exp))));
const auto AsPointerFromArrayDecay =
castExpr(hasCastKind(CK_ArrayToPointerDecay),
unless(hasParent(arraySubscriptExpr())),
- has(maybeEvalCommaExpr(equalsNode(Exp))));
+ has(canResolveToExpr(equalsNode(Exp))));
// Treat calling `operator->()` of move-only classes as taking address.
// These are typically smart pointers with unique ownership so we treat
// mutation of pointee as mutation of the smart pointer itself.
- const auto AsOperatorArrowThis =
- cxxOperatorCallExpr(hasOverloadedOperatorName("->"),
- callee(cxxMethodDecl(ofClass(isMoveOnly()),
- returns(nonConstPointerType()))),
- argumentCountIs(1),
- hasArgument(0, maybeEvalCommaExpr(equalsNode(Exp))));
+ const auto AsOperatorArrowThis = cxxOperatorCallExpr(
+ hasOverloadedOperatorName("->"),
+ callee(
+ cxxMethodDecl(ofClass(isMoveOnly()), returns(nonConstPointerType()))),
+ argumentCountIs(1), hasArgument(0, canResolveToExpr(equalsNode(Exp))));
// Used as non-const-ref argument when calling a function.
// An argument is assumed to be non-const-ref when the function is unresolved.
// Instantiated template functions are not handled here but in
// findFunctionArgMutation which has additional smarts for handling forwarding
// references.
- const auto NonConstRefParam = forEachArgumentWithParam(
- maybeEvalCommaExpr(equalsNode(Exp)),
- parmVarDecl(hasType(nonConstReferenceType())));
+ const auto NonConstRefParam = forEachArgumentWithParamType(
+ anyOf(canResolveToExpr(equalsNode(Exp)),
+ memberExpr(hasObjectExpression(canResolveToExpr(equalsNode(Exp))))),
+ nonConstReferenceType());
const auto NotInstantiated = unless(hasDeclaration(isInstantiated()));
+ const auto TypeDependentCallee =
+ callee(expr(anyOf(unresolvedLookupExpr(), unresolvedMemberExpr(),
+ cxxDependentScopeMemberExpr(),
+ hasType(templateTypeParmType()), isTypeDependent())));
+
const auto AsNonConstRefArg = anyOf(
callExpr(NonConstRefParam, NotInstantiated),
cxxConstructExpr(NonConstRefParam, NotInstantiated),
- callExpr(callee(expr(anyOf(unresolvedLookupExpr(), unresolvedMemberExpr(),
- cxxDependentScopeMemberExpr(),
- hasType(templateTypeParmType())))),
- hasAnyArgument(maybeEvalCommaExpr(equalsNode(Exp)))),
- cxxUnresolvedConstructExpr(hasAnyArgument(maybeEvalCommaExpr(equalsNode(Exp)))));
+ callExpr(TypeDependentCallee,
+ hasAnyArgument(canResolveToExpr(equalsNode(Exp)))),
+ cxxUnresolvedConstructExpr(
+ hasAnyArgument(canResolveToExpr(equalsNode(Exp)))),
+ // Previous False Positive in the following Code:
+ // `template <typename T> void f() { int i = 42; new Type<T>(i); }`
+ // Where the constructor of `Type` takes its argument as reference.
+ // The AST does not resolve in a `cxxConstructExpr` because it is
+ // type-dependent.
+ parenListExpr(hasDescendant(expr(canResolveToExpr(equalsNode(Exp))))),
+ // If the initializer is for a reference type, there is no cast for
+ // the variable. Values are cast to RValue first.
+ initListExpr(hasAnyInit(expr(canResolveToExpr(equalsNode(Exp))))));
// Captured by a lambda by reference.
// If we're initializing a capture with 'Exp' directly then we're initializing
@@ -278,17 +353,22 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// For returning by value there will be an ImplicitCastExpr <LValueToRValue>.
// For returning by const-ref there will be an ImplicitCastExpr <NoOp> (for
// adding const.)
- const auto AsNonConstRefReturn = returnStmt(hasReturnValue(
- maybeEvalCommaExpr(equalsNode(Exp))));
+ const auto AsNonConstRefReturn =
+ returnStmt(hasReturnValue(canResolveToExpr(equalsNode(Exp))));
+
+ // It is used as a non-const-reference for initalizing a range-for loop.
+ const auto AsNonConstRefRangeInit = cxxForRangeStmt(
+ hasRangeInit(declRefExpr(allOf(canResolveToExpr(equalsNode(Exp)),
+ hasType(nonConstReferenceType())))));
const auto Matches = match(
- traverse(
- ast_type_traits::TK_AsIs,
- findAll(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand, AsNonConstThis,
- AsAmpersandOperand, AsPointerFromArrayDecay,
- AsOperatorArrowThis, AsNonConstRefArg,
- AsLambdaRefCaptureInit, AsNonConstRefReturn))
- .bind("stmt"))),
+ traverse(TK_AsIs,
+ findAll(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand,
+ AsNonConstThis, AsAmpersandOperand,
+ AsPointerFromArrayDecay, AsOperatorArrowThis,
+ AsNonConstRefArg, AsLambdaRefCaptureInit,
+ AsNonConstRefReturn, AsNonConstRefRangeInit))
+ .bind("stmt"))),
Stm, Context);
return selectFirst<Stmt>("stmt", Matches);
}
@@ -296,9 +376,10 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
const Stmt *ExprMutationAnalyzer::findMemberMutation(const Expr *Exp) {
// Check whether any member of 'Exp' is mutated.
const auto MemberExprs =
- match(findAll(expr(anyOf(memberExpr(hasObjectExpression(equalsNode(Exp))),
- cxxDependentScopeMemberExpr(
- hasObjectExpression(equalsNode(Exp)))))
+ match(findAll(expr(anyOf(memberExpr(hasObjectExpression(
+ canResolveToExpr(equalsNode(Exp)))),
+ cxxDependentScopeMemberExpr(hasObjectExpression(
+ canResolveToExpr(equalsNode(Exp))))))
.bind(NodeID<Expr>::value)),
Stm, Context);
return findExprMutation(MemberExprs);
@@ -306,43 +387,112 @@ const Stmt *ExprMutationAnalyzer::findMemberMutation(const Expr *Exp) {
const Stmt *ExprMutationAnalyzer::findArrayElementMutation(const Expr *Exp) {
// Check whether any element of an array is mutated.
- const auto SubscriptExprs = match(
- findAll(arraySubscriptExpr(hasBase(ignoringImpCasts(equalsNode(Exp))))
- .bind(NodeID<Expr>::value)),
- Stm, Context);
+ const auto SubscriptExprs =
+ match(findAll(arraySubscriptExpr(
+ anyOf(hasBase(canResolveToExpr(equalsNode(Exp))),
+ hasBase(implicitCastExpr(
+ allOf(hasCastKind(CK_ArrayToPointerDecay),
+ hasSourceExpression(canResolveToExpr(
+ equalsNode(Exp))))))))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
return findExprMutation(SubscriptExprs);
}
const Stmt *ExprMutationAnalyzer::findCastMutation(const Expr *Exp) {
+ // If the 'Exp' is explicitly casted to a non-const reference type the
+ // 'Exp' is considered to be modified.
+ const auto ExplicitCast = match(
+ findAll(
+ stmt(castExpr(hasSourceExpression(canResolveToExpr(equalsNode(Exp))),
+ explicitCastExpr(
+ hasDestinationType(nonConstReferenceType()))))
+ .bind("stmt")),
+ Stm, Context);
+
+ if (const auto *CastStmt = selectFirst<Stmt>("stmt", ExplicitCast))
+ return CastStmt;
+
// If 'Exp' is casted to any non-const reference type, check the castExpr.
- const auto Casts =
- match(findAll(castExpr(hasSourceExpression(equalsNode(Exp)),
- anyOf(explicitCastExpr(hasDestinationType(
- nonConstReferenceType())),
- implicitCastExpr(hasImplicitDestinationType(
- nonConstReferenceType()))))
- .bind(NodeID<Expr>::value)),
- Stm, Context);
+ const auto Casts = match(
+ findAll(
+ expr(castExpr(hasSourceExpression(canResolveToExpr(equalsNode(Exp))),
+ anyOf(explicitCastExpr(
+ hasDestinationType(nonConstReferenceType())),
+ implicitCastExpr(hasImplicitDestinationType(
+ nonConstReferenceType())))))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
+
if (const Stmt *S = findExprMutation(Casts))
return S;
// Treat std::{move,forward} as cast.
const auto Calls =
match(findAll(callExpr(callee(namedDecl(
hasAnyName("::std::move", "::std::forward"))),
- hasArgument(0, equalsNode(Exp)))
+ hasArgument(0, canResolveToExpr(equalsNode(Exp))))
.bind("expr")),
Stm, Context);
return findExprMutation(Calls);
}
const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
+ // Keep the ordering for the specific initialization matches to happen first,
+ // because it is cheaper to match all potential modifications of the loop
+ // variable.
+
+ // The range variable is a reference to a builtin array. In that case the
+ // array is considered modified if the loop-variable is a non-const reference.
+ const auto DeclStmtToNonRefToArray = declStmt(hasSingleDecl(varDecl(hasType(
+ hasUnqualifiedDesugaredType(referenceType(pointee(arrayType())))))));
+ const auto RefToArrayRefToElements = match(
+ findAll(stmt(cxxForRangeStmt(
+ hasLoopVariable(varDecl(hasType(nonConstReferenceType()))
+ .bind(NodeID<Decl>::value)),
+ hasRangeStmt(DeclStmtToNonRefToArray),
+ hasRangeInit(canResolveToExpr(equalsNode(Exp)))))
+ .bind("stmt")),
+ Stm, Context);
+
+ if (const auto *BadRangeInitFromArray =
+ selectFirst<Stmt>("stmt", RefToArrayRefToElements))
+ return BadRangeInitFromArray;
+
+ // Small helper to match special cases in range-for loops.
+ //
+ // It is possible that containers do not provide a const-overload for their
+ // iterator accessors. If this is the case, the variable is used non-const
+ // no matter what happens in the loop. This requires special detection as it
+ // is then faster to find all mutations of the loop variable.
+ // It aims at a different modification as well.
+ const auto HasAnyNonConstIterator =
+ anyOf(allOf(hasMethod(allOf(hasName("begin"), unless(isConst()))),
+ unless(hasMethod(allOf(hasName("begin"), isConst())))),
+ allOf(hasMethod(allOf(hasName("end"), unless(isConst()))),
+ unless(hasMethod(allOf(hasName("end"), isConst())))));
+
+ const auto DeclStmtToNonConstIteratorContainer = declStmt(
+ hasSingleDecl(varDecl(hasType(hasUnqualifiedDesugaredType(referenceType(
+ pointee(hasDeclaration(cxxRecordDecl(HasAnyNonConstIterator)))))))));
+
+ const auto RefToContainerBadIterators =
+ match(findAll(stmt(cxxForRangeStmt(allOf(
+ hasRangeStmt(DeclStmtToNonConstIteratorContainer),
+ hasRangeInit(canResolveToExpr(equalsNode(Exp))))))
+ .bind("stmt")),
+ Stm, Context);
+
+ if (const auto *BadIteratorsContainer =
+ selectFirst<Stmt>("stmt", RefToContainerBadIterators))
+ return BadIteratorsContainer;
+
// If range for looping over 'Exp' with a non-const reference loop variable,
// check all declRefExpr of the loop variable.
const auto LoopVars =
match(findAll(cxxForRangeStmt(
hasLoopVariable(varDecl(hasType(nonConstReferenceType()))
.bind(NodeID<Decl>::value)),
- hasRangeInit(equalsNode(Exp)))),
+ hasRangeInit(canResolveToExpr(equalsNode(Exp))))),
Stm, Context);
return findDeclMutation(LoopVars);
}
@@ -356,7 +506,8 @@ const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) {
hasOverloadedOperatorName("*"),
callee(cxxMethodDecl(ofClass(isMoveOnly()),
returns(nonConstReferenceType()))),
- argumentCountIs(1), hasArgument(0, equalsNode(Exp)))
+ argumentCountIs(1),
+ hasArgument(0, canResolveToExpr(equalsNode(Exp))))
.bind(NodeID<Expr>::value)),
Stm, Context);
if (const Stmt *S = findExprMutation(Ref))
@@ -367,13 +518,12 @@ const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) {
stmt(forEachDescendant(
varDecl(
hasType(nonConstReferenceType()),
- hasInitializer(anyOf(equalsNode(Exp),
- conditionalOperator(anyOf(
- hasTrueExpression(equalsNode(Exp)),
- hasFalseExpression(equalsNode(Exp)))))),
+ hasInitializer(anyOf(canResolveToExpr(equalsNode(Exp)),
+ memberExpr(hasObjectExpression(
+ canResolveToExpr(equalsNode(Exp)))))),
hasParent(declStmt().bind("stmt")),
- // Don't follow the reference in range statement, we've handled
- // that separately.
+ // Don't follow the reference in range statement, we've
+ // handled that separately.
unless(hasParent(declStmt(hasParent(
cxxForRangeStmt(hasRangeStmt(equalsBoundNode("stmt"))))))))
.bind(NodeID<Decl>::value))),
@@ -383,13 +533,13 @@ const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) {
const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) {
const auto NonConstRefParam = forEachArgumentWithParam(
- equalsNode(Exp),
+ canResolveToExpr(equalsNode(Exp)),
parmVarDecl(hasType(nonConstReferenceType())).bind("parm"));
const auto IsInstantiated = hasDeclaration(isInstantiated());
const auto FuncDecl = hasDeclaration(functionDecl().bind("func"));
const auto Matches = match(
traverse(
- ast_type_traits::TK_AsIs,
+ TK_AsIs,
findAll(
expr(anyOf(callExpr(NonConstRefParam, IsInstantiated, FuncDecl,
unless(callee(namedDecl(hasAnyName(
diff --git a/clang/lib/StaticAnalyzer/Core/IssueHash.cpp b/clang/lib/Analysis/IssueHash.cpp
index e7497f3fbdaa..94816747668d 100644
--- a/clang/lib/StaticAnalyzer/Core/IssueHash.cpp
+++ b/clang/lib/Analysis/IssueHash.cpp
@@ -5,7 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Core/IssueHash.h"
+
+#include "clang/Analysis/IssueHash.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -120,7 +121,8 @@ static std::string GetEnclosingDeclContextSignature(const Decl *D) {
return "";
}
-static StringRef GetNthLineOfFile(const llvm::MemoryBuffer *Buffer, int Line) {
+static StringRef GetNthLineOfFile(llvm::Optional<llvm::MemoryBufferRef> Buffer,
+ int Line) {
if (!Buffer)
return "";
@@ -131,11 +133,11 @@ static StringRef GetNthLineOfFile(const llvm::MemoryBuffer *Buffer, int Line) {
return *LI;
}
-static std::string NormalizeLine(const SourceManager &SM, FullSourceLoc &L,
+static std::string NormalizeLine(const SourceManager &SM, const FullSourceLoc &L,
const LangOptions &LangOpts) {
static StringRef Whitespaces = " \t\n";
- StringRef Str = GetNthLineOfFile(SM.getBuffer(L.getFileID(), L),
+ StringRef Str = GetNthLineOfFile(SM.getBufferOrNone(L.getFileID(), L),
L.getExpansionLineNumber());
StringRef::size_type col = Str.find_first_not_of(Whitespaces);
if (col == StringRef::npos)
@@ -144,8 +146,8 @@ static std::string NormalizeLine(const SourceManager &SM, FullSourceLoc &L,
col++;
SourceLocation StartOfLine =
SM.translateLineCol(SM.getFileID(L), L.getExpansionLineNumber(), col);
- const llvm::MemoryBuffer *Buffer =
- SM.getBuffer(SM.getFileID(StartOfLine), StartOfLine);
+ Optional<llvm::MemoryBufferRef> Buffer =
+ SM.getBufferOrNone(SM.getFileID(StartOfLine), StartOfLine);
if (!Buffer)
return {};
@@ -167,7 +169,7 @@ static std::string NormalizeLine(const SourceManager &SM, FullSourceLoc &L,
return LineBuff.str();
}
-static llvm::SmallString<32> GetHashOfContent(StringRef Content) {
+static llvm::SmallString<32> GetMD5HashOfContent(StringRef Content) {
llvm::MD5 Hash;
llvm::MD5::MD5Result MD5Res;
SmallString<32> Res;
@@ -179,26 +181,27 @@ static llvm::SmallString<32> GetHashOfContent(StringRef Content) {
return Res;
}
-std::string clang::GetIssueString(const SourceManager &SM,
- FullSourceLoc &IssueLoc,
- StringRef CheckerName, StringRef BugType,
- const Decl *D,
+std::string clang::getIssueString(const FullSourceLoc &IssueLoc,
+ StringRef CheckerName,
+ StringRef WarningMessage,
+ const Decl *IssueDecl,
const LangOptions &LangOpts) {
static StringRef Delimiter = "$";
return (llvm::Twine(CheckerName) + Delimiter +
- GetEnclosingDeclContextSignature(D) + Delimiter +
+ GetEnclosingDeclContextSignature(IssueDecl) + Delimiter +
Twine(IssueLoc.getExpansionColumnNumber()) + Delimiter +
- NormalizeLine(SM, IssueLoc, LangOpts) + Delimiter + BugType)
+ NormalizeLine(IssueLoc.getManager(), IssueLoc, LangOpts) +
+ Delimiter + WarningMessage)
.str();
}
-SmallString<32> clang::GetIssueHash(const SourceManager &SM,
- FullSourceLoc &IssueLoc,
- StringRef CheckerName, StringRef BugType,
- const Decl *D,
+SmallString<32> clang::getIssueHash(const FullSourceLoc &IssueLoc,
+ StringRef CheckerName,
+ StringRef WarningMessage,
+ const Decl *IssueDecl,
const LangOptions &LangOpts) {
- return GetHashOfContent(
- GetIssueString(SM, IssueLoc, CheckerName, BugType, D, LangOpts));
+ return GetMD5HashOfContent(getIssueString(
+ IssueLoc, CheckerName, WarningMessage, IssueDecl, LangOpts));
}
diff --git a/clang/lib/Analysis/LiveVariables.cpp b/clang/lib/Analysis/LiveVariables.cpp
index d24c40b457b4..8cdc4cc5bd61 100644
--- a/clang/lib/Analysis/LiveVariables.cpp
+++ b/clang/lib/Analysis/LiveVariables.cpp
@@ -27,7 +27,7 @@ namespace {
class LiveVariablesImpl {
public:
AnalysisDeclContext &analysisContext;
- llvm::ImmutableSet<const Stmt *>::Factory SSetFact;
+ llvm::ImmutableSet<const Expr *>::Factory ESetFact;
llvm::ImmutableSet<const VarDecl *>::Factory DSetFact;
llvm::ImmutableSet<const BindingDecl *>::Factory BSetFact;
llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksEndToLiveness;
@@ -45,16 +45,15 @@ public:
LiveVariables::Observer *obs = nullptr);
void dumpBlockLiveness(const SourceManager& M);
- void dumpStmtLiveness(const SourceManager& M);
+ void dumpExprLiveness(const SourceManager& M);
LiveVariablesImpl(AnalysisDeclContext &ac, bool KillAtAssign)
- : analysisContext(ac),
- SSetFact(false), // Do not canonicalize ImmutableSets by default.
- DSetFact(false), // This is a *major* performance win.
- BSetFact(false),
- killAtAssign(KillAtAssign) {}
+ : analysisContext(ac),
+ ESetFact(false), // Do not canonicalize ImmutableSets by default.
+ DSetFact(false), // This is a *major* performance win.
+ BSetFact(false), killAtAssign(KillAtAssign) {}
};
-}
+} // namespace
static LiveVariablesImpl &getImpl(void *x) {
return *((LiveVariablesImpl *) x);
@@ -64,8 +63,8 @@ static LiveVariablesImpl &getImpl(void *x) {
// Operations and queries on LivenessValues.
//===----------------------------------------------------------------------===//
-bool LiveVariables::LivenessValues::isLive(const Stmt *S) const {
- return liveStmts.contains(S);
+bool LiveVariables::LivenessValues::isLive(const Expr *E) const {
+ return liveExprs.contains(E);
}
bool LiveVariables::LivenessValues::isLive(const VarDecl *D) const {
@@ -97,10 +96,10 @@ LiveVariables::LivenessValues
LiveVariablesImpl::merge(LiveVariables::LivenessValues valsA,
LiveVariables::LivenessValues valsB) {
- llvm::ImmutableSetRef<const Stmt *>
- SSetRefA(valsA.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory()),
- SSetRefB(valsB.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory());
-
+ llvm::ImmutableSetRef<const Expr *> SSetRefA(
+ valsA.liveExprs.getRootWithoutRetain(), ESetFact.getTreeFactory()),
+ SSetRefB(valsB.liveExprs.getRootWithoutRetain(),
+ ESetFact.getTreeFactory());
llvm::ImmutableSetRef<const VarDecl *>
DSetRefA(valsA.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory()),
@@ -122,7 +121,7 @@ LiveVariablesImpl::merge(LiveVariables::LivenessValues valsA,
}
bool LiveVariables::LivenessValues::equals(const LivenessValues &V) const {
- return liveStmts == V.liveStmts && liveDecls == V.liveDecls;
+ return liveExprs == V.liveExprs && liveDecls == V.liveDecls;
}
//===----------------------------------------------------------------------===//
@@ -141,8 +140,8 @@ bool LiveVariables::isLive(const Stmt *S, const VarDecl *D) {
return isAlwaysAlive(D) || getImpl(impl).stmtsToLiveness[S].isLive(D);
}
-bool LiveVariables::isLive(const Stmt *Loc, const Stmt *S) {
- return getImpl(impl).stmtsToLiveness[Loc].isLive(S);
+bool LiveVariables::isLive(const Stmt *Loc, const Expr *Val) {
+ return getImpl(impl).stmtsToLiveness[Loc].isLive(Val);
}
//===----------------------------------------------------------------------===//
@@ -186,27 +185,27 @@ static const VariableArrayType *FindVA(QualType Ty) {
return nullptr;
}
-static const Stmt *LookThroughStmt(const Stmt *S) {
- while (S) {
- if (const Expr *Ex = dyn_cast<Expr>(S))
- S = Ex->IgnoreParens();
- if (const FullExpr *FE = dyn_cast<FullExpr>(S)) {
- S = FE->getSubExpr();
+static const Expr *LookThroughExpr(const Expr *E) {
+ while (E) {
+ if (const Expr *Ex = dyn_cast<Expr>(E))
+ E = Ex->IgnoreParens();
+ if (const FullExpr *FE = dyn_cast<FullExpr>(E)) {
+ E = FE->getSubExpr();
continue;
}
- if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S)) {
- S = OVE->getSourceExpr();
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ E = OVE->getSourceExpr();
continue;
}
break;
}
- return S;
+ return E;
}
-static void AddLiveStmt(llvm::ImmutableSet<const Stmt *> &Set,
- llvm::ImmutableSet<const Stmt *>::Factory &F,
- const Stmt *S) {
- Set = F.add(Set, LookThroughStmt(S));
+static void AddLiveExpr(llvm::ImmutableSet<const Expr *> &Set,
+ llvm::ImmutableSet<const Expr *>::Factory &F,
+ const Expr *E) {
+ Set = F.add(Set, LookThroughExpr(E));
}
void TransferFunctions::Visit(Stmt *S) {
@@ -215,8 +214,8 @@ void TransferFunctions::Visit(Stmt *S) {
StmtVisitor<TransferFunctions>::Visit(S);
- if (isa<Expr>(S)) {
- val.liveStmts = LV.SSetFact.remove(val.liveStmts, S);
+ if (const auto *E = dyn_cast<Expr>(S)) {
+ val.liveExprs = LV.ESetFact.remove(val.liveExprs, E);
}
// Mark all children expressions live.
@@ -233,7 +232,7 @@ void TransferFunctions::Visit(Stmt *S) {
// Include the implicit "this" pointer as being live.
CXXMemberCallExpr *CE = cast<CXXMemberCallExpr>(S);
if (Expr *ImplicitObj = CE->getImplicitObjectArgument()) {
- AddLiveStmt(val.liveStmts, LV.SSetFact, ImplicitObj);
+ AddLiveExpr(val.liveExprs, LV.ESetFact, ImplicitObj);
}
break;
}
@@ -250,7 +249,7 @@ void TransferFunctions::Visit(Stmt *S) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl())) {
for (const VariableArrayType* VA = FindVA(VD->getType());
VA != nullptr; VA = FindVA(VA->getElementType())) {
- AddLiveStmt(val.liveStmts, LV.SSetFact, VA->getSizeExpr());
+ AddLiveExpr(val.liveExprs, LV.ESetFact, VA->getSizeExpr());
}
}
break;
@@ -263,7 +262,7 @@ void TransferFunctions::Visit(Stmt *S) {
if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(child))
child = OV->getSourceExpr();
child = child->IgnoreParens();
- val.liveStmts = LV.SSetFact.add(val.liveStmts, child);
+ val.liveExprs = LV.ESetFact.add(val.liveExprs, child);
return;
}
@@ -284,36 +283,39 @@ void TransferFunctions::Visit(Stmt *S) {
// If one of the branches is an expression rather than a compound
// statement, it will be bad if we mark it as live at the terminator
// of the if-statement (i.e., immediately after the condition expression).
- AddLiveStmt(val.liveStmts, LV.SSetFact, cast<IfStmt>(S)->getCond());
+ AddLiveExpr(val.liveExprs, LV.ESetFact, cast<IfStmt>(S)->getCond());
return;
}
case Stmt::WhileStmtClass: {
// If the loop body is an expression rather than a compound statement,
// it will be bad if we mark it as live at the terminator of the loop
// (i.e., immediately after the condition expression).
- AddLiveStmt(val.liveStmts, LV.SSetFact, cast<WhileStmt>(S)->getCond());
+ AddLiveExpr(val.liveExprs, LV.ESetFact, cast<WhileStmt>(S)->getCond());
return;
}
case Stmt::DoStmtClass: {
// If the loop body is an expression rather than a compound statement,
// it will be bad if we mark it as live at the terminator of the loop
// (i.e., immediately after the condition expression).
- AddLiveStmt(val.liveStmts, LV.SSetFact, cast<DoStmt>(S)->getCond());
+ AddLiveExpr(val.liveExprs, LV.ESetFact, cast<DoStmt>(S)->getCond());
return;
}
case Stmt::ForStmtClass: {
// If the loop body is an expression rather than a compound statement,
// it will be bad if we mark it as live at the terminator of the loop
// (i.e., immediately after the condition expression).
- AddLiveStmt(val.liveStmts, LV.SSetFact, cast<ForStmt>(S)->getCond());
+ AddLiveExpr(val.liveExprs, LV.ESetFact, cast<ForStmt>(S)->getCond());
return;
}
}
+ // HACK + FIXME: What is this? One could only guess that this is an attempt to
+ // fish for live values, for example, arguments from a call expression.
+ // Maybe we could take inspiration from UninitializedVariable analysis?
for (Stmt *Child : S->children()) {
- if (Child)
- AddLiveStmt(val.liveStmts, LV.SSetFact, Child);
+ if (const auto *E = dyn_cast_or_null<Expr>(Child))
+ AddLiveExpr(val.liveExprs, LV.ESetFact, E);
}
}
@@ -416,7 +418,7 @@ VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE)
const Expr *subEx = UE->getArgumentExpr();
if (subEx->getType()->isVariableArrayType()) {
assert(subEx->isLValue());
- val.liveStmts = LV.SSetFact.add(val.liveStmts, subEx->IgnoreParens());
+ val.liveExprs = LV.ESetFact.add(val.liveExprs, subEx->IgnoreParens());
}
}
@@ -613,19 +615,19 @@ void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
llvm::errs() << "\n";
}
-void LiveVariables::dumpStmtLiveness(const SourceManager &M) {
- getImpl(impl).dumpStmtLiveness(M);
+void LiveVariables::dumpExprLiveness(const SourceManager &M) {
+ getImpl(impl).dumpExprLiveness(M);
}
-void LiveVariablesImpl::dumpStmtLiveness(const SourceManager &M) {
+void LiveVariablesImpl::dumpExprLiveness(const SourceManager &M) {
// Don't iterate over blockEndsToLiveness directly because it's not sorted.
- for (auto I : *analysisContext.getCFG()) {
+ for (const CFGBlock *B : *analysisContext.getCFG()) {
- llvm::errs() << "\n[ B" << I->getBlockID()
- << " (live statements at block exit) ]\n";
- for (auto S : blocksEndToLiveness[I].liveStmts) {
+ llvm::errs() << "\n[ B" << B->getBlockID()
+ << " (live expressions at block exit) ]\n";
+ for (const Expr *E : blocksEndToLiveness[B].liveExprs) {
llvm::errs() << "\n";
- S->dump();
+ E->dump();
}
llvm::errs() << "\n";
}
diff --git a/clang/lib/Analysis/PathDiagnostic.cpp b/clang/lib/Analysis/PathDiagnostic.cpp
index c88e6c1e1535..b42f47fb68c5 100644
--- a/clang/lib/Analysis/PathDiagnostic.cpp
+++ b/clang/lib/Analysis/PathDiagnostic.cpp
@@ -327,6 +327,10 @@ static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y) {
}
static bool compareCrossTUSourceLocs(FullSourceLoc XL, FullSourceLoc YL) {
+ if (XL.isInvalid() && YL.isValid())
+ return true;
+ if (XL.isValid() && YL.isInvalid())
+ return false;
std::pair<FileID, unsigned> XOffs = XL.getDecomposedLoc();
std::pair<FileID, unsigned> YOffs = YL.getDecomposedLoc();
const SourceManager &SM = XL.getManager();
@@ -349,6 +353,10 @@ static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
FullSourceLoc YL = Y.getLocation().asLocation();
if (XL != YL)
return compareCrossTUSourceLocs(XL, YL);
+ FullSourceLoc XUL = X.getUniqueingLoc().asLocation();
+ FullSourceLoc YUL = Y.getUniqueingLoc().asLocation();
+ if (XUL != YUL)
+ return compareCrossTUSourceLocs(XUL, YUL);
if (X.getBugType() != Y.getBugType())
return X.getBugType() < Y.getBugType();
if (X.getCategory() != Y.getCategory())
@@ -357,20 +365,27 @@ static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
return X.getVerboseDescription() < Y.getVerboseDescription();
if (X.getShortDescription() != Y.getShortDescription())
return X.getShortDescription() < Y.getShortDescription();
- if (X.getDeclWithIssue() != Y.getDeclWithIssue()) {
- const Decl *XD = X.getDeclWithIssue();
- if (!XD)
+ auto CompareDecls = [&XL](const Decl *D1, const Decl *D2) -> Optional<bool> {
+ if (D1 == D2)
+ return None;
+ if (!D1)
return true;
- const Decl *YD = Y.getDeclWithIssue();
- if (!YD)
+ if (!D2)
return false;
- SourceLocation XDL = XD->getLocation();
- SourceLocation YDL = YD->getLocation();
- if (XDL != YDL) {
+ SourceLocation D1L = D1->getLocation();
+ SourceLocation D2L = D2->getLocation();
+ if (D1L != D2L) {
const SourceManager &SM = XL.getManager();
- return compareCrossTUSourceLocs(FullSourceLoc(XDL, SM),
- FullSourceLoc(YDL, SM));
+ return compareCrossTUSourceLocs(FullSourceLoc(D1L, SM),
+ FullSourceLoc(D2L, SM));
}
+ return None;
+ };
+ if (auto Result = CompareDecls(X.getDeclWithIssue(), Y.getDeclWithIssue()))
+ return *Result;
+ if (XUL.isValid()) {
+ if (auto Result = CompareDecls(X.getUniqueingDecl(), Y.getUniqueingDecl()))
+ return *Result;
}
PathDiagnostic::meta_iterator XI = X.meta_begin(), XE = X.meta_end();
PathDiagnostic::meta_iterator YI = Y.meta_begin(), YE = Y.meta_end();
@@ -1068,9 +1083,9 @@ unsigned PathDiagnostic::full_size() {
//===----------------------------------------------------------------------===//
void PathDiagnosticLocation::Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(Range.getBegin().getRawEncoding());
- ID.AddInteger(Range.getEnd().getRawEncoding());
- ID.AddInteger(Loc.getRawEncoding());
+ ID.Add(Range.getBegin());
+ ID.Add(Range.getEnd());
+ ID.Add(static_cast<const SourceLocation &>(Loc));
}
void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
@@ -1080,8 +1095,8 @@ void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddInteger((unsigned) getDisplayHint());
ArrayRef<SourceRange> Ranges = getRanges();
for (const auto &I : Ranges) {
- ID.AddInteger(I.getBegin().getRawEncoding());
- ID.AddInteger(I.getEnd().getRawEncoding());
+ ID.Add(I.getBegin());
+ ID.Add(I.getEnd());
}
}
@@ -1118,6 +1133,7 @@ void PathDiagnosticPopUpPiece::Profile(llvm::FoldingSetNodeID &ID) const {
void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
ID.Add(getLocation());
+ ID.Add(getUniqueingLoc());
ID.AddString(BugType);
ID.AddString(VerboseDesc);
ID.AddString(Category);
diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp
index 1208eaf93e25..21583e92c72d 100644
--- a/clang/lib/Analysis/ThreadSafety.cpp
+++ b/clang/lib/Analysis/ThreadSafety.cpp
@@ -1266,13 +1266,29 @@ ClassifyDiagnostic(const AttrTy *A) {
}
bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
- if (!CurrentMethod)
+ const threadSafety::til::SExpr *SExp = CapE.sexpr();
+ assert(SExp && "Null expressions should be ignored");
+
+ if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) {
+ const ValueDecl *VD = LP->clangDecl();
+ // Variables defined in a function are always inaccessible.
+ if (!VD->isDefinedOutsideFunctionOrMethod())
+ return false;
+ // For now we consider static class members to be inaccessible.
+ if (isa<CXXRecordDecl>(VD->getDeclContext()))
return false;
- if (const auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) {
- const auto *VD = P->clangDecl();
- if (VD)
- return VD->getDeclContext() == CurrentMethod->getDeclContext();
+ // Global variables are always in scope.
+ return true;
}
+
+ // Members are in scope from methods of the same class.
+ if (const auto *P = dyn_cast<til::Project>(SExp)) {
+ if (!CurrentMethod)
+ return false;
+ const ValueDecl *VD = P->clangDecl();
+ return VD->getDeclContext() == CurrentMethod->getDeclContext();
+ }
+
return false;
}
@@ -1641,8 +1657,7 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
// Otherwise the negative requirement must be propagated to the caller.
LDat = FSet.findLock(Analyzer->FactMan, Cp);
if (!LDat) {
- Analyzer->Handler.handleMutexNotHeld("", D, POK, Cp.toString(),
- LK_Shared, Loc);
+ Analyzer->Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
}
return;
}
diff --git a/clang/lib/Analysis/ThreadSafetyCommon.cpp b/clang/lib/Analysis/ThreadSafetyCommon.cpp
index 1b8c55e56d47..0c5d1857cc2b 100644
--- a/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -185,7 +185,7 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
return CapabilityExpr(nullptr, false);
// Hack to deal with smart pointers -- strip off top-level pointer casts.
- if (const auto *CE = dyn_cast_or_null<til::Cast>(E)) {
+ if (const auto *CE = dyn_cast<til::Cast>(E)) {
if (CE->castOpcode() == til::CAST_objToPtr)
return CapabilityExpr(CE->expr(), Neg);
}
@@ -274,7 +274,7 @@ til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
const auto *VD = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
// Function parameters require substitution and/or renaming.
- if (const auto *PV = dyn_cast_or_null<ParmVarDecl>(VD)) {
+ if (const auto *PV = dyn_cast<ParmVarDecl>(VD)) {
unsigned I = PV->getFunctionScopeIndex();
const DeclContext *D = PV->getDeclContext();
if (Ctx && Ctx->FunArgs) {
diff --git a/clang/lib/Basic/Cuda.cpp b/clang/lib/Basic/Cuda.cpp
index 709185707bd9..144113f2d2e7 100644
--- a/clang/lib/Basic/Cuda.cpp
+++ b/clang/lib/Basic/Cuda.cpp
@@ -64,6 +64,7 @@ struct CudaArchToStringMap {
{ CudaArch::GFX##gpu, "gfx" #gpu, "compute_amdgcn" }
CudaArchToStringMap arch_names[] = {
// clang-format off
+ {CudaArch::UNUSED, "", ""},
SM2(20, "compute_20"), SM2(21, "compute_20"), // Fermi
SM(30), SM(32), SM(35), SM(37), // Kepler
SM(50), SM(52), SM(53), // Maxwell
@@ -71,20 +72,34 @@ CudaArchToStringMap arch_names[] = {
SM(70), SM(72), // Volta
SM(75), // Turing
SM(80), // Ampere
- GFX(600), // tahiti
- GFX(601), // pitcairn, verde, oland,hainan
- GFX(700), // kaveri
- GFX(701), // hawaii
- GFX(702), // 290,290x,R390,R390x
- GFX(703), // kabini mullins
- GFX(704), // bonaire
- GFX(801), // carrizo
- GFX(802), // tonga,iceland
- GFX(803), // fiji,polaris10
- GFX(810), // stoney
- GFX(900), // vega, instinct
- GFX(902), GFX(904), GFX(906), GFX(908), GFX(909),
- GFX(1010), GFX(1011), GFX(1012),
+ GFX(600), // gfx600
+ GFX(601), // gfx601
+ GFX(602), // gfx602
+ GFX(700), // gfx700
+ GFX(701), // gfx701
+ GFX(702), // gfx702
+ GFX(703), // gfx703
+ GFX(704), // gfx704
+ GFX(705), // gfx705
+ GFX(801), // gfx801
+ GFX(802), // gfx802
+ GFX(803), // gfx803
+ GFX(805), // gfx805
+ GFX(810), // gfx810
+ GFX(900), // gfx900
+ GFX(902), // gfx902
+ GFX(904), // gfx903
+ GFX(906), // gfx906
+ GFX(908), // gfx908
+ GFX(909), // gfx909
+ GFX(90c), // gfx90c
+ GFX(1010), // gfx1010
+ GFX(1011), // gfx1011
+ GFX(1012), // gfx1012
+ GFX(1030), // gfx1030
+ GFX(1031), // gfx1031
+ GFX(1032), // gfx1032
+ GFX(1033), // gfx1033
// clang-format on
};
#undef SM
diff --git a/clang/lib/Basic/Diagnostic.cpp b/clang/lib/Basic/Diagnostic.cpp
index 661eabf9bc7c..d3b2122e9c59 100644
--- a/clang/lib/Basic/Diagnostic.cpp
+++ b/clang/lib/Basic/Diagnostic.cpp
@@ -40,8 +40,8 @@
using namespace clang;
-const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
- DiagNullabilityKind nullability) {
+const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
+ DiagNullabilityKind nullability) {
StringRef string;
switch (nullability.first) {
case NullabilityKind::NonNull:
@@ -55,14 +55,20 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
case NullabilityKind::Unspecified:
string = nullability.second ? "'null_unspecified'" : "'_Null_unspecified'";
break;
+
+ case NullabilityKind::NullableResult:
+ assert(!nullability.second &&
+ "_Nullable_result isn't supported as context-sensitive keyword");
+ string = "_Nullable_result";
+ break;
}
DB.AddString(string);
return DB;
}
-const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
- llvm::Error &&E) {
+const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
+ llvm::Error &&E) {
DB.AddString(toString(std::move(E)));
return DB;
}
@@ -265,7 +271,8 @@ void DiagnosticsEngine::DiagStateMap::dump(SourceManager &SrcMgr,
PrintedOuterHeading = true;
llvm::errs() << "File " << &File << " <FileID " << ID.getHashValue()
- << ">: " << SrcMgr.getBuffer(ID)->getBufferIdentifier();
+ << ">: " << SrcMgr.getBufferOrFake(ID).getBufferIdentifier();
+
if (F.second.Parent) {
std::pair<FileID, unsigned> Decomp =
SrcMgr.getDecomposedIncludedLoc(ID);
@@ -481,13 +488,15 @@ void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
CurDiagLoc = storedDiag.getLocation();
CurDiagID = storedDiag.getID();
- NumDiagArgs = 0;
+ DiagStorage.NumDiagArgs = 0;
- DiagRanges.clear();
- DiagRanges.append(storedDiag.range_begin(), storedDiag.range_end());
+ DiagStorage.DiagRanges.clear();
+ DiagStorage.DiagRanges.append(storedDiag.range_begin(),
+ storedDiag.range_end());
- DiagFixItHints.clear();
- DiagFixItHints.append(storedDiag.fixit_begin(), storedDiag.fixit_end());
+ DiagStorage.FixItHints.clear();
+ DiagStorage.FixItHints.append(storedDiag.fixit_begin(),
+ storedDiag.fixit_end());
assert(Client && "DiagnosticConsumer not set!");
Level DiagLevel = storedDiag.getLevel();
@@ -805,7 +814,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
/// QualTypeVals - Pass a vector of arrays so that QualType names can be
/// compared to see if more information is needed to be printed.
SmallVector<intptr_t, 2> QualTypeVals;
- SmallVector<char, 64> Tree;
+ SmallString<64> Tree;
for (unsigned i = 0, e = getNumArgs(); i < e; ++i)
if (getArgKind(i) == DiagnosticsEngine::ak_qualtype)
@@ -1140,13 +1149,13 @@ bool ForwardingDiagnosticConsumer::IncludeInDiagnosticCounts() const {
return Target.IncludeInDiagnosticCounts();
}
-PartialDiagnostic::StorageAllocator::StorageAllocator() {
+PartialDiagnostic::DiagStorageAllocator::DiagStorageAllocator() {
for (unsigned I = 0; I != NumCached; ++I)
FreeList[I] = Cached + I;
NumFreeListEntries = NumCached;
}
-PartialDiagnostic::StorageAllocator::~StorageAllocator() {
+PartialDiagnostic::DiagStorageAllocator::~DiagStorageAllocator() {
// Don't assert if we are in a CrashRecovery context, as this invariant may
// be invalidated during a crash.
assert((NumFreeListEntries == NumCached ||
diff --git a/clang/lib/Basic/DiagnosticIDs.cpp b/clang/lib/Basic/DiagnosticIDs.cpp
index 8c7e63e06301..06a8e2ed5ebd 100644
--- a/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/clang/lib/Basic/DiagnosticIDs.cpp
@@ -26,6 +26,78 @@ using namespace clang;
namespace {
+struct StaticDiagInfoRec;
+
+// Store the descriptions in a separate table to avoid pointers that need to
+// be relocated, and also decrease the amount of data needed on 64-bit
+// platforms. See "How To Write Shared Libraries" by Ulrich Drepper.
+struct StaticDiagInfoDescriptionStringTable {
+#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
+ SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ char ENUM##_desc[sizeof(DESC)];
+ // clang-format off
+#include "clang/Basic/DiagnosticCommonKinds.inc"
+#include "clang/Basic/DiagnosticDriverKinds.inc"
+#include "clang/Basic/DiagnosticFrontendKinds.inc"
+#include "clang/Basic/DiagnosticSerializationKinds.inc"
+#include "clang/Basic/DiagnosticLexKinds.inc"
+#include "clang/Basic/DiagnosticParseKinds.inc"
+#include "clang/Basic/DiagnosticASTKinds.inc"
+#include "clang/Basic/DiagnosticCommentKinds.inc"
+#include "clang/Basic/DiagnosticCrossTUKinds.inc"
+#include "clang/Basic/DiagnosticSemaKinds.inc"
+#include "clang/Basic/DiagnosticAnalysisKinds.inc"
+#include "clang/Basic/DiagnosticRefactoringKinds.inc"
+ // clang-format on
+#undef DIAG
+};
+
+const StaticDiagInfoDescriptionStringTable StaticDiagInfoDescriptions = {
+#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
+ SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ DESC,
+// clang-format off
+#include "clang/Basic/DiagnosticCommonKinds.inc"
+#include "clang/Basic/DiagnosticDriverKinds.inc"
+#include "clang/Basic/DiagnosticFrontendKinds.inc"
+#include "clang/Basic/DiagnosticSerializationKinds.inc"
+#include "clang/Basic/DiagnosticLexKinds.inc"
+#include "clang/Basic/DiagnosticParseKinds.inc"
+#include "clang/Basic/DiagnosticASTKinds.inc"
+#include "clang/Basic/DiagnosticCommentKinds.inc"
+#include "clang/Basic/DiagnosticCrossTUKinds.inc"
+#include "clang/Basic/DiagnosticSemaKinds.inc"
+#include "clang/Basic/DiagnosticAnalysisKinds.inc"
+#include "clang/Basic/DiagnosticRefactoringKinds.inc"
+ // clang-format on
+#undef DIAG
+};
+
+extern const StaticDiagInfoRec StaticDiagInfo[];
+
+// Stored separately from StaticDiagInfoRec to pack better. Otherwise,
+// StaticDiagInfoRec would have extra padding on 64-bit platforms.
+const uint32_t StaticDiagInfoDescriptionOffsets[] = {
+#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
+ SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ offsetof(StaticDiagInfoDescriptionStringTable, ENUM##_desc),
+// clang-format off
+#include "clang/Basic/DiagnosticCommonKinds.inc"
+#include "clang/Basic/DiagnosticDriverKinds.inc"
+#include "clang/Basic/DiagnosticFrontendKinds.inc"
+#include "clang/Basic/DiagnosticSerializationKinds.inc"
+#include "clang/Basic/DiagnosticLexKinds.inc"
+#include "clang/Basic/DiagnosticParseKinds.inc"
+#include "clang/Basic/DiagnosticASTKinds.inc"
+#include "clang/Basic/DiagnosticCommentKinds.inc"
+#include "clang/Basic/DiagnosticCrossTUKinds.inc"
+#include "clang/Basic/DiagnosticSemaKinds.inc"
+#include "clang/Basic/DiagnosticAnalysisKinds.inc"
+#include "clang/Basic/DiagnosticRefactoringKinds.inc"
+ // clang-format on
+#undef DIAG
+};
+
// Diagnostic classes.
enum {
CLASS_NOTE = 0x01,
@@ -42,19 +114,22 @@ struct StaticDiagInfoRec {
unsigned SFINAE : 2;
unsigned WarnNoWerror : 1;
unsigned WarnShowInSystemHeader : 1;
+ unsigned Deferrable : 1;
unsigned Category : 6;
uint16_t OptionGroupIndex;
uint16_t DescriptionLen;
- const char *DescriptionStr;
unsigned getOptionGroupIndex() const {
return OptionGroupIndex;
}
StringRef getDescription() const {
- return StringRef(DescriptionStr, DescriptionLen);
+ size_t MyIndex = this - &StaticDiagInfo[0];
+ uint32_t StringOffset = StaticDiagInfoDescriptionOffsets[MyIndex];
+ const char* Table = reinterpret_cast<const char*>(&StaticDiagInfoDescriptions);
+ return StringRef(&Table[StringOffset], DescriptionLen);
}
diag::Flavor getFlavor() const {
@@ -92,16 +167,21 @@ VALIDATE_DIAG_SIZE(REFACTORING)
#undef VALIDATE_DIAG_SIZE
#undef STRINGIFY_NAME
-} // namespace anonymous
-
-static const StaticDiagInfoRec StaticDiagInfo[] = {
+const StaticDiagInfoRec StaticDiagInfo[] = {
#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, CATEGORY) \
+ SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
{ \
- diag::ENUM, DEFAULT_SEVERITY, CLASS, DiagnosticIDs::SFINAE, NOWERROR, \
- SHOWINSYSHEADER, CATEGORY, GROUP, STR_SIZE(DESC, uint16_t), DESC \
- } \
- ,
+ diag::ENUM, \
+ DEFAULT_SEVERITY, \
+ CLASS, \
+ DiagnosticIDs::SFINAE, \
+ NOWERROR, \
+ SHOWINSYSHEADER, \
+ DEFERRABLE, \
+ CATEGORY, \
+ GROUP, \
+ STR_SIZE(DESC, uint16_t)},
+// clang-format off
#include "clang/Basic/DiagnosticCommonKinds.inc"
#include "clang/Basic/DiagnosticDriverKinds.inc"
#include "clang/Basic/DiagnosticFrontendKinds.inc"
@@ -114,9 +194,12 @@ static const StaticDiagInfoRec StaticDiagInfo[] = {
#include "clang/Basic/DiagnosticSemaKinds.inc"
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#include "clang/Basic/DiagnosticRefactoringKinds.inc"
+ // clang-format on
#undef DIAG
};
+} // namespace
+
static const unsigned StaticDiagInfoSize = llvm::array_lengthof(StaticDiagInfo);
/// GetDiagInfo - Return the StaticDiagInfoRec entry for the specified DiagID,
@@ -253,6 +336,12 @@ DiagnosticIDs::getDiagnosticSFINAEResponse(unsigned DiagID) {
return SFINAE_Report;
}
+bool DiagnosticIDs::isDeferrable(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Deferrable;
+ return false;
+}
+
/// getBuiltinDiagClass - Return the class field of the diagnostic.
///
static unsigned getBuiltinDiagClass(unsigned DiagID) {
diff --git a/clang/lib/Basic/FileEntry.cpp b/clang/lib/Basic/FileEntry.cpp
new file mode 100644
index 000000000000..5ee9bef9523e
--- /dev/null
+++ b/clang/lib/Basic/FileEntry.cpp
@@ -0,0 +1,24 @@
+//===- FileEntry.cpp - File references --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Defines implementation for clang::FileEntry and clang::FileEntryRef.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/FileEntry.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/VirtualFileSystem.h"
+
+using namespace clang;
+
+FileEntry::FileEntry() : UniqueID(0, 0) {}
+
+FileEntry::~FileEntry() = default;
+
+void FileEntry::closeFile() const { File.reset(); }
diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp
index e92e9d5911c0..6e9d5d7fb422 100644
--- a/clang/lib/Basic/FileManager.cpp
+++ b/clang/lib/Basic/FileManager.cpp
@@ -69,21 +69,22 @@ void FileManager::clearStatCache() { StatCache.reset(); }
/// Retrieve the directory that the given file name resides in.
/// Filename can point to either a real file or a virtual file.
-static llvm::ErrorOr<const DirectoryEntry *>
+static llvm::Expected<DirectoryEntryRef>
getDirectoryFromFile(FileManager &FileMgr, StringRef Filename,
bool CacheFailure) {
if (Filename.empty())
- return std::errc::no_such_file_or_directory;
+ return llvm::errorCodeToError(
+ make_error_code(std::errc::no_such_file_or_directory));
if (llvm::sys::path::is_separator(Filename[Filename.size() - 1]))
- return std::errc::is_a_directory;
+ return llvm::errorCodeToError(make_error_code(std::errc::is_a_directory));
StringRef DirName = llvm::sys::path::parent_path(Filename);
// Use the current directory if file has no path component.
if (DirName.empty())
DirName = ".";
- return FileMgr.getDirectory(DirName, CacheFailure);
+ return FileMgr.getDirectoryRef(DirName, CacheFailure);
}
/// Add all ancestors of the given path (pointing to either a file or
@@ -141,7 +142,7 @@ FileManager::getDirectoryRef(StringRef DirName, bool CacheFailure) {
SeenDirEntries.insert({DirName, std::errc::no_such_file_or_directory});
if (!SeenDirInsertResult.second) {
if (SeenDirInsertResult.first->second)
- return DirectoryEntryRef(&*SeenDirInsertResult.first);
+ return DirectoryEntryRef(*SeenDirInsertResult.first);
return llvm::errorCodeToError(SeenDirInsertResult.first->second.getError());
}
@@ -180,7 +181,7 @@ FileManager::getDirectoryRef(StringRef DirName, bool CacheFailure) {
UDE.Name = InterndDirName;
}
- return DirectoryEntryRef(&NamedDirEnt);
+ return DirectoryEntryRef(NamedDirEnt);
}
llvm::ErrorOr<const DirectoryEntry *>
@@ -212,11 +213,11 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
SeenFileInsertResult.first->second.getError());
// Construct and return and FileEntryRef, unless it's a redirect to another
// filename.
- SeenFileEntryOrRedirect Value = *SeenFileInsertResult.first->second;
- FileEntry *FE;
- if (LLVM_LIKELY(FE = Value.dyn_cast<FileEntry *>()))
- return FileEntryRef(SeenFileInsertResult.first->first(), *FE);
- return getFileRef(*Value.get<const StringRef *>(), openFile, CacheFailure);
+ FileEntryRef::MapValue Value = *SeenFileInsertResult.first->second;
+ if (LLVM_LIKELY(Value.V.is<FileEntry *>()))
+ return FileEntryRef(*SeenFileInsertResult.first);
+ return FileEntryRef(*reinterpret_cast<const FileEntryRef::MapEntry *>(
+ Value.V.get<const void *>()));
}
// We've not seen this before. Fill it in.
@@ -235,14 +236,15 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
// without a 'sys' subdir will get a cached failure result.
auto DirInfoOrErr = getDirectoryFromFile(*this, Filename, CacheFailure);
if (!DirInfoOrErr) { // Directory doesn't exist, file can't exist.
+ std::error_code Err = errorToErrorCode(DirInfoOrErr.takeError());
if (CacheFailure)
- NamedFileEnt->second = DirInfoOrErr.getError();
+ NamedFileEnt->second = Err;
else
SeenFileEntries.erase(Filename);
- return llvm::errorCodeToError(DirInfoOrErr.getError());
+ return llvm::errorCodeToError(Err);
}
- const DirectoryEntry *DirInfo = *DirInfoOrErr;
+ DirectoryEntryRef DirInfo = *DirInfoOrErr;
// FIXME: Use the directory info to prune this, before doing the stat syscall.
// FIXME: This will reduce the # syscalls.
@@ -268,26 +270,30 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
// This occurs when one dir is symlinked to another, for example.
FileEntry &UFE = UniqueRealFiles[Status.getUniqueID()];
- NamedFileEnt->second = &UFE;
-
- // If the name returned by getStatValue is different than Filename, re-intern
- // the name.
- if (Status.getName() != Filename) {
- auto &NewNamedFileEnt =
- *SeenFileEntries.insert({Status.getName(), &UFE}).first;
- assert((*NewNamedFileEnt.second).get<FileEntry *>() == &UFE &&
+ if (Status.getName() == Filename) {
+ // The name matches. Set the FileEntry.
+ NamedFileEnt->second = FileEntryRef::MapValue(UFE, DirInfo);
+ } else {
+ // Name mismatch. We need a redirect. First grab the actual entry we want
+ // to return.
+ auto &Redirection =
+ *SeenFileEntries
+ .insert({Status.getName(), FileEntryRef::MapValue(UFE, DirInfo)})
+ .first;
+ assert(Redirection.second->V.is<FileEntry *>() &&
+ "filename redirected to a non-canonical filename?");
+ assert(Redirection.second->V.get<FileEntry *>() == &UFE &&
"filename from getStatValue() refers to wrong file");
- InterndFileName = NewNamedFileEnt.first().data();
- // In addition to re-interning the name, construct a redirecting seen file
- // entry, that will point to the name the filesystem actually wants to use.
- StringRef *Redirect = new (CanonicalNameStorage) StringRef(InterndFileName);
- auto SeenFileInsertResultIt = SeenFileEntries.find(Filename);
- assert(SeenFileInsertResultIt != SeenFileEntries.end() &&
- "unexpected SeenFileEntries cache miss");
- SeenFileInsertResultIt->second = Redirect;
- NamedFileEnt = &*SeenFileInsertResultIt;
+
+ // Cache the redirection in the previously-inserted entry, still available
+ // in the tentative return value.
+ NamedFileEnt->second = FileEntryRef::MapValue(Redirection);
+
+ // Fix the tentative return value.
+ NamedFileEnt = &Redirection;
}
+ FileEntryRef ReturnedRef(*NamedFileEnt);
if (UFE.isValid()) { // Already have an entry with this inode, return it.
// FIXME: this hack ensures that if we look up a file by a virtual path in
@@ -296,26 +302,26 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
// module's structure when its headers/module map are mapped in the VFS.
// We should remove this as soon as we can properly support a file having
// multiple names.
- if (DirInfo != UFE.Dir && Status.IsVFSMapped)
- UFE.Dir = DirInfo;
+ if (&DirInfo.getDirEntry() != UFE.Dir && Status.IsVFSMapped)
+ UFE.Dir = &DirInfo.getDirEntry();
- // Always update the name to use the last name by which a file was accessed.
- // FIXME: Neither this nor always using the first name is correct; we want
- // to switch towards a design where we return a FileName object that
+ // Always update LastRef to the last name by which a file was accessed.
+ // FIXME: Neither this nor always using the first reference is correct; we
+ // want to switch towards a design where we return a FileName object that
// encapsulates both the name by which the file was accessed and the
// corresponding FileEntry.
- // FIXME: The Name should be removed from FileEntry once all clients
- // adopt FileEntryRef.
- UFE.Name = InterndFileName;
+ // FIXME: LastRef should be removed from FileEntry once all clients adopt
+ // FileEntryRef.
+ UFE.LastRef = ReturnedRef;
- return FileEntryRef(InterndFileName, UFE);
+ return ReturnedRef;
}
// Otherwise, we don't have this file yet, add it.
- UFE.Name = InterndFileName;
+ UFE.LastRef = ReturnedRef;
UFE.Size = Status.getSize();
UFE.ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
- UFE.Dir = DirInfo;
+ UFE.Dir = &DirInfo.getDirEntry();
UFE.UID = NextFileUID++;
UFE.UniqueID = Status.getUniqueID();
UFE.IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
@@ -329,24 +335,46 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
// We should still fill the path even if we aren't opening the file.
fillRealPathName(&UFE, InterndFileName);
}
- return FileEntryRef(InterndFileName, UFE);
+ return ReturnedRef;
+}
+
+llvm::Expected<FileEntryRef> FileManager::getSTDIN() {
+ // Only read stdin once.
+ if (STDIN)
+ return *STDIN;
+
+ std::unique_ptr<llvm::MemoryBuffer> Content;
+ if (auto ContentOrError = llvm::MemoryBuffer::getSTDIN())
+ Content = std::move(*ContentOrError);
+ else
+ return llvm::errorCodeToError(ContentOrError.getError());
+
+ STDIN = getVirtualFileRef(Content->getBufferIdentifier(),
+ Content->getBufferSize(), 0);
+ FileEntry &FE = const_cast<FileEntry &>(STDIN->getFileEntry());
+ FE.Content = std::move(Content);
+ FE.IsNamedPipe = true;
+ return *STDIN;
}
-const FileEntry *
-FileManager::getVirtualFile(StringRef Filename, off_t Size,
- time_t ModificationTime) {
+const FileEntry *FileManager::getVirtualFile(StringRef Filename, off_t Size,
+ time_t ModificationTime) {
+ return &getVirtualFileRef(Filename, Size, ModificationTime).getFileEntry();
+}
+
+FileEntryRef FileManager::getVirtualFileRef(StringRef Filename, off_t Size,
+ time_t ModificationTime) {
++NumFileLookups;
// See if there is already an entry in the map for an existing file.
auto &NamedFileEnt = *SeenFileEntries.insert(
{Filename, std::errc::no_such_file_or_directory}).first;
if (NamedFileEnt.second) {
- SeenFileEntryOrRedirect Value = *NamedFileEnt.second;
- FileEntry *FE;
- if (LLVM_LIKELY(FE = Value.dyn_cast<FileEntry *>()))
- return FE;
- return getVirtualFile(*Value.get<const StringRef *>(), Size,
- ModificationTime);
+ FileEntryRef::MapValue Value = *NamedFileEnt.second;
+ if (LLVM_LIKELY(Value.V.is<FileEntry *>()))
+ return FileEntryRef(NamedFileEnt);
+ return FileEntryRef(*reinterpret_cast<const FileEntryRef::MapEntry *>(
+ Value.V.get<const void *>()));
}
// We've not seen this before, or the file is cached as non-existent.
@@ -357,7 +385,8 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
// Now that all ancestors of Filename are in the cache, the
// following call is guaranteed to find the DirectoryEntry from the
// cache.
- auto DirInfo = getDirectoryFromFile(*this, Filename, /*CacheFailure=*/true);
+ auto DirInfo = expectedToOptional(
+ getDirectoryFromFile(*this, Filename, /*CacheFailure=*/true));
assert(DirInfo &&
"The directory of a virtual file should already be in the cache.");
@@ -372,7 +401,7 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
Status.getUser(), Status.getGroup(), Size,
Status.getType(), Status.getPermissions());
- NamedFileEnt.second = UFE;
+ NamedFileEnt.second = FileEntryRef::MapValue(*UFE, *DirInfo);
// If we had already opened this file, close it now so we don't
// leak the descriptor. We're not going to use the file
@@ -381,8 +410,11 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
UFE->closeFile();
// If we already have an entry with this inode, return it.
+ //
+ // FIXME: Surely this should add a reference by the new name, and return
+ // it instead...
if (UFE->isValid())
- return UFE;
+ return FileEntryRef(NamedFileEnt);
UFE->UniqueID = Status.getUniqueID();
UFE->IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
@@ -390,17 +422,17 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
} else {
VirtualFileEntries.push_back(std::make_unique<FileEntry>());
UFE = VirtualFileEntries.back().get();
- NamedFileEnt.second = UFE;
+ NamedFileEnt.second = FileEntryRef::MapValue(*UFE, *DirInfo);
}
- UFE->Name = InterndFileName;
+ UFE->LastRef = FileEntryRef(NamedFileEnt);
UFE->Size = Size;
UFE->ModTime = ModificationTime;
- UFE->Dir = *DirInfo;
+ UFE->Dir = &DirInfo->getDirEntry();
UFE->UID = NextFileUID++;
UFE->IsValid = true;
UFE->File.reset();
- return UFE;
+ return FileEntryRef(NamedFileEnt);
}
llvm::Optional<FileEntryRef> FileManager::getBypassFile(FileEntryRef VF) {
@@ -409,17 +441,30 @@ llvm::Optional<FileEntryRef> FileManager::getBypassFile(FileEntryRef VF) {
if (getStatValue(VF.getName(), Status, /*isFile=*/true, /*F=*/nullptr))
return None;
- // Fill it in from the stat.
+ if (!SeenBypassFileEntries)
+ SeenBypassFileEntries = std::make_unique<
+ llvm::StringMap<llvm::ErrorOr<FileEntryRef::MapValue>>>();
+
+ // If we've already bypassed just use the existing one.
+ auto Insertion = SeenBypassFileEntries->insert(
+ {VF.getName(), std::errc::no_such_file_or_directory});
+ if (!Insertion.second)
+ return FileEntryRef(*Insertion.first);
+
+ // Fill in the new entry from the stat.
BypassFileEntries.push_back(std::make_unique<FileEntry>());
const FileEntry &VFE = VF.getFileEntry();
FileEntry &BFE = *BypassFileEntries.back();
- BFE.Name = VFE.getName();
+ Insertion.first->second = FileEntryRef::MapValue(BFE, VF.getDir());
+ BFE.LastRef = FileEntryRef(*Insertion.first);
BFE.Size = Status.getSize();
BFE.Dir = VFE.Dir;
BFE.ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
BFE.UID = NextFileUID++;
BFE.IsValid = true;
- return FileEntryRef(VF.getName(), BFE);
+
+ // Save the entry in the bypass table and return.
+ return FileEntryRef(*Insertion.first);
}
bool FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
@@ -460,10 +505,14 @@ void FileManager::fillRealPathName(FileEntry *UFE, llvm::StringRef FileName) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
bool RequiresNullTerminator) {
+ // If the content is living on the file entry, return a reference to it.
+ if (Entry->Content)
+ return llvm::MemoryBuffer::getMemBuffer(Entry->Content->getMemBufferRef());
+
uint64_t FileSize = Entry->getSize();
// If there's a high enough chance that the file have changed since we
// got its size, force a stat before opening it.
- if (isVolatile)
+ if (isVolatile || Entry->isNamedPipe())
FileSize = -1;
StringRef Filename = Entry->getName();
@@ -534,13 +583,13 @@ void FileManager::GetUniqueIDMapping(
UIDToFiles.resize(NextFileUID);
// Map file entries
- for (llvm::StringMap<llvm::ErrorOr<SeenFileEntryOrRedirect>,
+ for (llvm::StringMap<llvm::ErrorOr<FileEntryRef::MapValue>,
llvm::BumpPtrAllocator>::const_iterator
FE = SeenFileEntries.begin(),
FEEnd = SeenFileEntries.end();
FE != FEEnd; ++FE)
- if (llvm::ErrorOr<SeenFileEntryOrRedirect> Entry = FE->getValue()) {
- if (const auto *FE = (*Entry).dyn_cast<FileEntry *>())
+ if (llvm::ErrorOr<FileEntryRef::MapValue> Entry = FE->getValue()) {
+ if (const auto *FE = Entry->V.dyn_cast<FileEntry *>())
UIDToFiles[FE->getUID()] = FE;
}
diff --git a/clang/lib/Basic/FixedPoint.cpp b/clang/lib/Basic/FixedPoint.cpp
deleted file mode 100644
index ed8b92c98fdb..000000000000
--- a/clang/lib/Basic/FixedPoint.cpp
+++ /dev/null
@@ -1,394 +0,0 @@
-//===- FixedPoint.cpp - Fixed point constant handling -----------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// Defines the implementation for the fixed point number interface.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Basic/FixedPoint.h"
-
-namespace clang {
-
-APFixedPoint APFixedPoint::convert(const FixedPointSemantics &DstSema,
- bool *Overflow) const {
- llvm::APSInt NewVal = Val;
- unsigned DstWidth = DstSema.getWidth();
- unsigned DstScale = DstSema.getScale();
- bool Upscaling = DstScale > getScale();
- if (Overflow)
- *Overflow = false;
-
- if (Upscaling) {
- NewVal = NewVal.extend(NewVal.getBitWidth() + DstScale - getScale());
- NewVal <<= (DstScale - getScale());
- } else {
- NewVal >>= (getScale() - DstScale);
- }
-
- auto Mask = llvm::APInt::getBitsSetFrom(
- NewVal.getBitWidth(),
- std::min(DstScale + DstSema.getIntegralBits(), NewVal.getBitWidth()));
- llvm::APInt Masked(NewVal & Mask);
-
- // Change in the bits above the sign
- if (!(Masked == Mask || Masked == 0)) {
- // Found overflow in the bits above the sign
- if (DstSema.isSaturated())
- NewVal = NewVal.isNegative() ? Mask : ~Mask;
- else if (Overflow)
- *Overflow = true;
- }
-
- // If the dst semantics are unsigned, but our value is signed and negative, we
- // clamp to zero.
- if (!DstSema.isSigned() && NewVal.isSigned() && NewVal.isNegative()) {
- // Found negative overflow for unsigned result
- if (DstSema.isSaturated())
- NewVal = 0;
- else if (Overflow)
- *Overflow = true;
- }
-
- NewVal = NewVal.extOrTrunc(DstWidth);
- NewVal.setIsSigned(DstSema.isSigned());
- return APFixedPoint(NewVal, DstSema);
-}
-
-int APFixedPoint::compare(const APFixedPoint &Other) const {
- llvm::APSInt ThisVal = getValue();
- llvm::APSInt OtherVal = Other.getValue();
- bool ThisSigned = Val.isSigned();
- bool OtherSigned = OtherVal.isSigned();
- unsigned OtherScale = Other.getScale();
- unsigned OtherWidth = OtherVal.getBitWidth();
-
- unsigned CommonWidth = std::max(Val.getBitWidth(), OtherWidth);
-
- // Prevent overflow in the event the widths are the same but the scales differ
- CommonWidth += getScale() >= OtherScale ? getScale() - OtherScale
- : OtherScale - getScale();
-
- ThisVal = ThisVal.extOrTrunc(CommonWidth);
- OtherVal = OtherVal.extOrTrunc(CommonWidth);
-
- unsigned CommonScale = std::max(getScale(), OtherScale);
- ThisVal = ThisVal.shl(CommonScale - getScale());
- OtherVal = OtherVal.shl(CommonScale - OtherScale);
-
- if (ThisSigned && OtherSigned) {
- if (ThisVal.sgt(OtherVal))
- return 1;
- else if (ThisVal.slt(OtherVal))
- return -1;
- } else if (!ThisSigned && !OtherSigned) {
- if (ThisVal.ugt(OtherVal))
- return 1;
- else if (ThisVal.ult(OtherVal))
- return -1;
- } else if (ThisSigned && !OtherSigned) {
- if (ThisVal.isSignBitSet())
- return -1;
- else if (ThisVal.ugt(OtherVal))
- return 1;
- else if (ThisVal.ult(OtherVal))
- return -1;
- } else {
- // !ThisSigned && OtherSigned
- if (OtherVal.isSignBitSet())
- return 1;
- else if (ThisVal.ugt(OtherVal))
- return 1;
- else if (ThisVal.ult(OtherVal))
- return -1;
- }
-
- return 0;
-}
-
-APFixedPoint APFixedPoint::getMax(const FixedPointSemantics &Sema) {
- bool IsUnsigned = !Sema.isSigned();
- auto Val = llvm::APSInt::getMaxValue(Sema.getWidth(), IsUnsigned);
- if (IsUnsigned && Sema.hasUnsignedPadding())
- Val = Val.lshr(1);
- return APFixedPoint(Val, Sema);
-}
-
-APFixedPoint APFixedPoint::getMin(const FixedPointSemantics &Sema) {
- auto Val = llvm::APSInt::getMinValue(Sema.getWidth(), !Sema.isSigned());
- return APFixedPoint(Val, Sema);
-}
-
-FixedPointSemantics FixedPointSemantics::getCommonSemantics(
- const FixedPointSemantics &Other) const {
- unsigned CommonScale = std::max(getScale(), Other.getScale());
- unsigned CommonWidth =
- std::max(getIntegralBits(), Other.getIntegralBits()) + CommonScale;
-
- bool ResultIsSigned = isSigned() || Other.isSigned();
- bool ResultIsSaturated = isSaturated() || Other.isSaturated();
- bool ResultHasUnsignedPadding = false;
- if (!ResultIsSigned) {
- // Both are unsigned.
- ResultHasUnsignedPadding = hasUnsignedPadding() &&
- Other.hasUnsignedPadding() && !ResultIsSaturated;
- }
-
- // If the result is signed, add an extra bit for the sign. Otherwise, if it is
- // unsigned and has unsigned padding, we only need to add the extra padding
- // bit back if we are not saturating.
- if (ResultIsSigned || ResultHasUnsignedPadding)
- CommonWidth++;
-
- return FixedPointSemantics(CommonWidth, CommonScale, ResultIsSigned,
- ResultIsSaturated, ResultHasUnsignedPadding);
-}
-
-APFixedPoint APFixedPoint::add(const APFixedPoint &Other,
- bool *Overflow) const {
- auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
- APFixedPoint ConvertedThis = convert(CommonFXSema);
- APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
- llvm::APSInt ThisVal = ConvertedThis.getValue();
- llvm::APSInt OtherVal = ConvertedOther.getValue();
- bool Overflowed = false;
-
- llvm::APSInt Result;
- if (CommonFXSema.isSaturated()) {
- Result = CommonFXSema.isSigned() ? ThisVal.sadd_sat(OtherVal)
- : ThisVal.uadd_sat(OtherVal);
- } else {
- Result = ThisVal.isSigned() ? ThisVal.sadd_ov(OtherVal, Overflowed)
- : ThisVal.uadd_ov(OtherVal, Overflowed);
- }
-
- if (Overflow)
- *Overflow = Overflowed;
-
- return APFixedPoint(Result, CommonFXSema);
-}
-
-APFixedPoint APFixedPoint::sub(const APFixedPoint &Other,
- bool *Overflow) const {
- auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
- APFixedPoint ConvertedThis = convert(CommonFXSema);
- APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
- llvm::APSInt ThisVal = ConvertedThis.getValue();
- llvm::APSInt OtherVal = ConvertedOther.getValue();
- bool Overflowed = false;
-
- llvm::APSInt Result;
- if (CommonFXSema.isSaturated()) {
- Result = CommonFXSema.isSigned() ? ThisVal.ssub_sat(OtherVal)
- : ThisVal.usub_sat(OtherVal);
- } else {
- Result = ThisVal.isSigned() ? ThisVal.ssub_ov(OtherVal, Overflowed)
- : ThisVal.usub_ov(OtherVal, Overflowed);
- }
-
- if (Overflow)
- *Overflow = Overflowed;
-
- return APFixedPoint(Result, CommonFXSema);
-}
-
-APFixedPoint APFixedPoint::mul(const APFixedPoint &Other,
- bool *Overflow) const {
- auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
- APFixedPoint ConvertedThis = convert(CommonFXSema);
- APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
- llvm::APSInt ThisVal = ConvertedThis.getValue();
- llvm::APSInt OtherVal = ConvertedOther.getValue();
- bool Overflowed = false;
-
- // Widen the LHS and RHS so we can perform a full multiplication.
- unsigned Wide = CommonFXSema.getWidth() * 2;
- if (CommonFXSema.isSigned()) {
- ThisVal = ThisVal.sextOrSelf(Wide);
- OtherVal = OtherVal.sextOrSelf(Wide);
- } else {
- ThisVal = ThisVal.zextOrSelf(Wide);
- OtherVal = OtherVal.zextOrSelf(Wide);
- }
-
- // Perform the full multiplication and downscale to get the same scale.
- //
- // Note that the right shifts here perform an implicit downwards rounding.
- // This rounding could discard bits that would technically place the result
- // outside the representable range. We interpret the spec as allowing us to
- // perform the rounding step first, avoiding the overflow case that would
- // arise.
- llvm::APSInt Result;
- if (CommonFXSema.isSigned())
- Result = ThisVal.smul_ov(OtherVal, Overflowed)
- .ashr(CommonFXSema.getScale());
- else
- Result = ThisVal.umul_ov(OtherVal, Overflowed)
- .lshr(CommonFXSema.getScale());
- assert(!Overflowed && "Full multiplication cannot overflow!");
- Result.setIsSigned(CommonFXSema.isSigned());
-
- // If our result lies outside of the representative range of the common
- // semantic, we either have overflow or saturation.
- llvm::APSInt Max = APFixedPoint::getMax(CommonFXSema).getValue()
- .extOrTrunc(Wide);
- llvm::APSInt Min = APFixedPoint::getMin(CommonFXSema).getValue()
- .extOrTrunc(Wide);
- if (CommonFXSema.isSaturated()) {
- if (Result < Min)
- Result = Min;
- else if (Result > Max)
- Result = Max;
- } else
- Overflowed = Result < Min || Result > Max;
-
- if (Overflow)
- *Overflow = Overflowed;
-
- return APFixedPoint(Result.sextOrTrunc(CommonFXSema.getWidth()),
- CommonFXSema);
-}
-
-APFixedPoint APFixedPoint::div(const APFixedPoint &Other,
- bool *Overflow) const {
- auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
- APFixedPoint ConvertedThis = convert(CommonFXSema);
- APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
- llvm::APSInt ThisVal = ConvertedThis.getValue();
- llvm::APSInt OtherVal = ConvertedOther.getValue();
- bool Overflowed = false;
-
- // Widen the LHS and RHS so we can perform a full division.
- unsigned Wide = CommonFXSema.getWidth() * 2;
- if (CommonFXSema.isSigned()) {
- ThisVal = ThisVal.sextOrSelf(Wide);
- OtherVal = OtherVal.sextOrSelf(Wide);
- } else {
- ThisVal = ThisVal.zextOrSelf(Wide);
- OtherVal = OtherVal.zextOrSelf(Wide);
- }
-
- // Upscale to compensate for the loss of precision from division, and
- // perform the full division.
- ThisVal = ThisVal.shl(CommonFXSema.getScale());
- llvm::APSInt Result;
- if (CommonFXSema.isSigned()) {
- llvm::APInt Rem;
- llvm::APInt::sdivrem(ThisVal, OtherVal, Result, Rem);
- // If the quotient is negative and the remainder is nonzero, round
- // towards negative infinity by subtracting epsilon from the result.
- if (ThisVal.isNegative() != OtherVal.isNegative() && !Rem.isNullValue())
- Result = Result - 1;
- } else
- Result = ThisVal.udiv(OtherVal);
- Result.setIsSigned(CommonFXSema.isSigned());
-
- // If our result lies outside of the representative range of the common
- // semantic, we either have overflow or saturation.
- llvm::APSInt Max = APFixedPoint::getMax(CommonFXSema).getValue()
- .extOrTrunc(Wide);
- llvm::APSInt Min = APFixedPoint::getMin(CommonFXSema).getValue()
- .extOrTrunc(Wide);
- if (CommonFXSema.isSaturated()) {
- if (Result < Min)
- Result = Min;
- else if (Result > Max)
- Result = Max;
- } else
- Overflowed = Result < Min || Result > Max;
-
- if (Overflow)
- *Overflow = Overflowed;
-
- return APFixedPoint(Result.sextOrTrunc(CommonFXSema.getWidth()),
- CommonFXSema);
-}
-
-void APFixedPoint::toString(llvm::SmallVectorImpl<char> &Str) const {
- llvm::APSInt Val = getValue();
- unsigned Scale = getScale();
-
- if (Val.isSigned() && Val.isNegative() && Val != -Val) {
- Val = -Val;
- Str.push_back('-');
- }
-
- llvm::APSInt IntPart = Val >> Scale;
-
- // Add 4 digits to hold the value after multiplying 10 (the radix)
- unsigned Width = Val.getBitWidth() + 4;
- llvm::APInt FractPart = Val.zextOrTrunc(Scale).zext(Width);
- llvm::APInt FractPartMask = llvm::APInt::getAllOnesValue(Scale).zext(Width);
- llvm::APInt RadixInt = llvm::APInt(Width, 10);
-
- IntPart.toString(Str, /*Radix=*/10);
- Str.push_back('.');
- do {
- (FractPart * RadixInt)
- .lshr(Scale)
- .toString(Str, /*Radix=*/10, Val.isSigned());
- FractPart = (FractPart * RadixInt) & FractPartMask;
- } while (FractPart != 0);
-}
-
-APFixedPoint APFixedPoint::negate(bool *Overflow) const {
- if (!isSaturated()) {
- if (Overflow)
- *Overflow =
- (!isSigned() && Val != 0) || (isSigned() && Val.isMinSignedValue());
- return APFixedPoint(-Val, Sema);
- }
-
- // We never overflow for saturation
- if (Overflow)
- *Overflow = false;
-
- if (isSigned())
- return Val.isMinSignedValue() ? getMax(Sema) : APFixedPoint(-Val, Sema);
- else
- return APFixedPoint(Sema);
-}
-
-llvm::APSInt APFixedPoint::convertToInt(unsigned DstWidth, bool DstSign,
- bool *Overflow) const {
- llvm::APSInt Result = getIntPart();
- unsigned SrcWidth = getWidth();
-
- llvm::APSInt DstMin = llvm::APSInt::getMinValue(DstWidth, !DstSign);
- llvm::APSInt DstMax = llvm::APSInt::getMaxValue(DstWidth, !DstSign);
-
- if (SrcWidth < DstWidth) {
- Result = Result.extend(DstWidth);
- } else if (SrcWidth > DstWidth) {
- DstMin = DstMin.extend(SrcWidth);
- DstMax = DstMax.extend(SrcWidth);
- }
-
- if (Overflow) {
- if (Result.isSigned() && !DstSign) {
- *Overflow = Result.isNegative() || Result.ugt(DstMax);
- } else if (Result.isUnsigned() && DstSign) {
- *Overflow = Result.ugt(DstMax);
- } else {
- *Overflow = Result < DstMin || Result > DstMax;
- }
- }
-
- Result.setIsSigned(DstSign);
- return Result.extOrTrunc(DstWidth);
-}
-
-APFixedPoint APFixedPoint::getFromIntValue(const llvm::APSInt &Value,
- const FixedPointSemantics &DstFXSema,
- bool *Overflow) {
- FixedPointSemantics IntFXSema = FixedPointSemantics::GetIntegerSemantics(
- Value.getBitWidth(), Value.isSigned());
- return APFixedPoint(Value, IntFXSema).convert(DstFXSema, Overflow);
-}
-
-} // namespace clang
diff --git a/clang/lib/Basic/IdentifierTable.cpp b/clang/lib/Basic/IdentifierTable.cpp
index 36b26d9b7c68..51c6e02e2e2e 100644
--- a/clang/lib/Basic/IdentifierTable.cpp
+++ b/clang/lib/Basic/IdentifierTable.cpp
@@ -714,6 +714,11 @@ StringRef clang::getNullabilitySpelling(NullabilityKind kind,
case NullabilityKind::Nullable:
return isContextSensitive ? "nullable" : "_Nullable";
+ case NullabilityKind::NullableResult:
+ assert(!isContextSensitive &&
+ "_Nullable_result isn't supported as context-sensitive keyword");
+ return "_Nullable_result";
+
case NullabilityKind::Unspecified:
return isContextSensitive ? "null_unspecified" : "_Null_unspecified";
}
diff --git a/clang/lib/Basic/LangOptions.cpp b/clang/lib/Basic/LangOptions.cpp
index c08670c87fb6..ed275ade4001 100644
--- a/clang/lib/Basic/LangOptions.cpp
+++ b/clang/lib/Basic/LangOptions.cpp
@@ -14,7 +14,7 @@
using namespace clang;
-LangOptions::LangOptions() {
+LangOptions::LangOptions() : LangStd(LangStandard::lang_unspecified) {
#define LANGOPT(Name, Bits, Default, Description) Name = Default;
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) set##Name(Default);
#include "clang/Basic/LangOptions.def"
diff --git a/clang/lib/Basic/Module.cpp b/clang/lib/Basic/Module.cpp
index b3daaa3a4442..2dd53b05d442 100644
--- a/clang/lib/Basic/Module.cpp
+++ b/clang/lib/Basic/Module.cpp
@@ -44,7 +44,7 @@ Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
InferSubmodules(false), InferExplicitSubmodules(false),
InferExportWildcard(false), ConfigMacrosExhaustive(false),
NoUndeclaredIncludes(false), ModuleMapIsPrivate(false),
- HasUmbrellaDir(false), NameVisibility(Hidden) {
+ NameVisibility(Hidden) {
if (Parent) {
IsAvailable = Parent->isAvailable();
IsUnimportable = Parent->isUnimportable();
@@ -75,7 +75,7 @@ static bool isPlatformEnvironment(const TargetInfo &Target, StringRef Feature) {
return true;
auto CmpPlatformEnv = [](StringRef LHS, StringRef RHS) {
- auto Pos = LHS.find("-");
+ auto Pos = LHS.find('-');
if (Pos == StringRef::npos)
return false;
SmallString<128> NewLHS = LHS.slice(0, Pos);
@@ -173,14 +173,10 @@ bool Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
}
bool Module::isSubModuleOf(const Module *Other) const {
- const Module *This = this;
- do {
- if (This == Other)
+ for (auto *Parent = this; Parent; Parent = Parent->Parent) {
+ if (Parent == Other)
return true;
-
- This = This->Parent;
- } while (This);
-
+ }
return false;
}
@@ -251,7 +247,10 @@ Module::DirectoryName Module::getUmbrellaDir() const {
if (Header U = getUmbrellaHeader())
return {"", U.Entry->getDir()};
- return {UmbrellaAsWritten, static_cast<const DirectoryEntry *>(Umbrella)};
+ if (auto *ME = Umbrella.dyn_cast<const DirectoryEntryRef::MapEntry *>())
+ return {UmbrellaAsWritten, DirectoryEntryRef(*ME)};
+
+ return {"", None};
}
void Module::addTopHeader(const FileEntry *File) {
@@ -675,7 +674,7 @@ ASTSourceDescriptor::ASTSourceDescriptor(Module &M)
: Signature(M.Signature), ClangModule(&M) {
if (M.Directory)
Path = M.Directory->getName();
- if (auto *File = M.getASTFile())
+ if (auto File = M.getASTFile())
ASTFile = File->getName();
}
diff --git a/clang/lib/Basic/OpenCLOptions.cpp b/clang/lib/Basic/OpenCLOptions.cpp
new file mode 100644
index 000000000000..266acc5fe477
--- /dev/null
+++ b/clang/lib/Basic/OpenCLOptions.cpp
@@ -0,0 +1,106 @@
+//===--- OpenCLOptions.cpp---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/OpenCLOptions.h"
+
+namespace clang {
+
+bool OpenCLOptions::isKnown(llvm::StringRef Ext) const {
+ return OptMap.find(Ext) != OptMap.end();
+}
+
+bool OpenCLOptions::isEnabled(llvm::StringRef Ext) const {
+ auto E = OptMap.find(Ext);
+ return E != OptMap.end() && E->second.Enabled;
+}
+
+bool OpenCLOptions::isSupported(llvm::StringRef Ext,
+ const LangOptions &LO) const {
+ auto E = OptMap.find(Ext);
+ if (E == OptMap.end()) {
+ return false;
+ }
+ auto I = OptMap.find(Ext)->getValue();
+ return I.Supported && I.isAvailableIn(LO);
+}
+
+bool OpenCLOptions::isSupportedCore(llvm::StringRef Ext,
+ const LangOptions &LO) const {
+ auto E = OptMap.find(Ext);
+ if (E == OptMap.end()) {
+ return false;
+ }
+ auto I = OptMap.find(Ext)->getValue();
+ return I.Supported && I.isCoreIn(LO);
+}
+
+bool OpenCLOptions::isSupportedOptionalCore(llvm::StringRef Ext,
+ const LangOptions &LO) const {
+ auto E = OptMap.find(Ext);
+ if (E == OptMap.end()) {
+ return false;
+ }
+ auto I = OptMap.find(Ext)->getValue();
+ return I.Supported && I.isOptionalCoreIn(LO);
+}
+
+bool OpenCLOptions::isSupportedCoreOrOptionalCore(llvm::StringRef Ext,
+ const LangOptions &LO) const {
+ return isSupportedCore(Ext, LO) || isSupportedOptionalCore(Ext, LO);
+}
+
+bool OpenCLOptions::isSupportedExtension(llvm::StringRef Ext,
+ const LangOptions &LO) const {
+ auto E = OptMap.find(Ext);
+ if (E == OptMap.end()) {
+ return false;
+ }
+ auto I = OptMap.find(Ext)->getValue();
+ return I.Supported && I.isAvailableIn(LO) &&
+ !isSupportedCoreOrOptionalCore(Ext, LO);
+}
+
+void OpenCLOptions::enable(llvm::StringRef Ext, bool V) {
+ OptMap[Ext].Enabled = V;
+}
+
+void OpenCLOptions::support(llvm::StringRef Ext, bool V) {
+ assert(!Ext.empty() && "Extension is empty.");
+ assert(Ext[0] != '+' && Ext[0] != '-');
+ OptMap[Ext].Supported = V;
+}
+
+OpenCLOptions::OpenCLOptions() {
+#define OPENCL_GENERIC_EXTENSION(Ext, AvailVer, CoreVer, OptVer) \
+ OptMap[#Ext].Avail = AvailVer; \
+ OptMap[#Ext].Core = CoreVer; \
+ OptMap[#Ext].Opt = OptVer;
+#include "clang/Basic/OpenCLExtensions.def"
+}
+
+void OpenCLOptions::addSupport(const llvm::StringMap<bool> &FeaturesMap,
+ const LangOptions &Opts) {
+ for (const auto &F : FeaturesMap) {
+ const auto &Name = F.getKey();
+ if (F.getValue() && isKnown(Name) && OptMap[Name].isAvailableIn(Opts))
+ support(Name);
+ }
+}
+
+void OpenCLOptions::disableAll() {
+ for (auto &Opt : OptMap)
+ Opt.getValue().Enabled = false;
+}
+
+void OpenCLOptions::enableSupportedCore(const LangOptions &LO) {
+ for (auto &Opt : OptMap)
+ if (isSupportedCoreOrOptionalCore(Opt.getKey(), LO))
+ Opt.getValue().Enabled = true;
+}
+
+} // end namespace clang
diff --git a/clang/lib/Basic/OpenMPKinds.cpp b/clang/lib/Basic/OpenMPKinds.cpp
index cae61ad4f2e3..5c19d60cbd6e 100644
--- a/clang/lib/Basic/OpenMPKinds.cpp
+++ b/clang/lib/Basic/OpenMPKinds.cpp
@@ -20,8 +20,8 @@
using namespace clang;
using namespace llvm::omp;
-unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
- StringRef Str) {
+unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
+ unsigned OpenMPVersion) {
switch (Kind) {
case OMPC_default:
return llvm::StringSwitch<unsigned>(Str)
@@ -51,26 +51,29 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
#define OPENMP_LINEAR_KIND(Name) .Case(#Name, OMPC_LINEAR_##Name)
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_LINEAR_unknown);
- case OMPC_map:
- return llvm::StringSwitch<unsigned>(Str)
+ case OMPC_map: {
+ unsigned Type = llvm::StringSwitch<unsigned>(Str)
#define OPENMP_MAP_KIND(Name) \
.Case(#Name, static_cast<unsigned>(OMPC_MAP_##Name))
#define OPENMP_MAP_MODIFIER_KIND(Name) \
.Case(#Name, static_cast<unsigned>(OMPC_MAP_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_MAP_unknown);
+ if (OpenMPVersion < 51 && Type == OMPC_MAP_MODIFIER_present)
+ return OMPC_MAP_MODIFIER_unknown;
+ return Type;
+ }
case OMPC_to:
- return llvm::StringSwitch<unsigned>(Str)
-#define OPENMP_TO_MODIFIER_KIND(Name) \
- .Case(#Name, static_cast<unsigned>(OMPC_TO_MODIFIER_##Name))
+ case OMPC_from: {
+ unsigned Type = llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_MOTION_MODIFIER_KIND(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_MOTION_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
- .Default(OMPC_TO_MODIFIER_unknown);
- case OMPC_from:
- return llvm::StringSwitch<unsigned>(Str)
-#define OPENMP_FROM_MODIFIER_KIND(Name) \
- .Case(#Name, static_cast<unsigned>(OMPC_FROM_MODIFIER_##Name))
-#include "clang/Basic/OpenMPKinds.def"
- .Default(OMPC_FROM_MODIFIER_unknown);
+ .Default(OMPC_MOTION_MODIFIER_unknown);
+ if (OpenMPVersion < 51 && Type == OMPC_MOTION_MODIFIER_present)
+ return OMPC_MOTION_MODIFIER_unknown;
+ return Type;
+ }
case OMPC_dist_schedule:
return llvm::StringSwitch<OpenMPDistScheduleClauseKind>(Str)
#define OPENMP_DIST_SCHEDULE_KIND(Name) .Case(#Name, OMPC_DIST_SCHEDULE_##Name)
@@ -254,29 +257,18 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
}
llvm_unreachable("Invalid OpenMP 'map' clause type");
case OMPC_to:
- switch (Type) {
- case OMPC_TO_MODIFIER_unknown:
- return "unknown";
-#define OPENMP_TO_MODIFIER_KIND(Name) \
- case OMPC_TO_MODIFIER_##Name: \
- return #Name;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- llvm_unreachable("Invalid OpenMP 'to' clause type");
case OMPC_from:
switch (Type) {
- case OMPC_FROM_MODIFIER_unknown:
+ case OMPC_MOTION_MODIFIER_unknown:
return "unknown";
-#define OPENMP_FROM_MODIFIER_KIND(Name) \
- case OMPC_FROM_MODIFIER_##Name: \
+#define OPENMP_MOTION_MODIFIER_KIND(Name) \
+ case OMPC_MOTION_MODIFIER_##Name: \
return #Name;
#include "clang/Basic/OpenMPKinds.def"
default:
break;
}
- llvm_unreachable("Invalid OpenMP 'from' clause type");
+ llvm_unreachable("Invalid OpenMP 'to' or 'from' clause type");
case OMPC_dist_schedule:
switch (Type) {
case OMPC_DIST_SCHEDULE_unknown:
diff --git a/clang/lib/Basic/ProfileList.cpp b/clang/lib/Basic/ProfileList.cpp
new file mode 100644
index 000000000000..56bc37a79301
--- /dev/null
+++ b/clang/lib/Basic/ProfileList.cpp
@@ -0,0 +1,113 @@
+//===--- ProfileList.h - ProfileList filter ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// User-provided filters include/exclude profile instrumentation in certain
+// functions or files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/ProfileList.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/SpecialCaseList.h"
+
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+namespace clang {
+
+class ProfileSpecialCaseList : public llvm::SpecialCaseList {
+public:
+ static std::unique_ptr<ProfileSpecialCaseList>
+ create(const std::vector<std::string> &Paths, llvm::vfs::FileSystem &VFS,
+ std::string &Error);
+
+ static std::unique_ptr<ProfileSpecialCaseList>
+ createOrDie(const std::vector<std::string> &Paths,
+ llvm::vfs::FileSystem &VFS);
+
+ bool isEmpty() const { return Sections.empty(); }
+
+ bool hasPrefix(StringRef Prefix) const {
+ for (auto &SectionIter : Sections)
+ if (SectionIter.Entries.count(Prefix) > 0)
+ return true;
+ return false;
+ }
+};
+
+std::unique_ptr<ProfileSpecialCaseList>
+ProfileSpecialCaseList::create(const std::vector<std::string> &Paths,
+ llvm::vfs::FileSystem &VFS,
+ std::string &Error) {
+ auto PSCL = std::make_unique<ProfileSpecialCaseList>();
+ if (PSCL->createInternal(Paths, VFS, Error))
+ return PSCL;
+ return nullptr;
+}
+
+std::unique_ptr<ProfileSpecialCaseList>
+ProfileSpecialCaseList::createOrDie(const std::vector<std::string> &Paths,
+ llvm::vfs::FileSystem &VFS) {
+ std::string Error;
+ if (auto PSCL = create(Paths, VFS, Error))
+ return PSCL;
+ llvm::report_fatal_error(Error);
+}
+
+}
+
+ProfileList::ProfileList(ArrayRef<std::string> Paths, SourceManager &SM)
+ : SCL(ProfileSpecialCaseList::createOrDie(
+ Paths, SM.getFileManager().getVirtualFileSystem())),
+ Empty(SCL->isEmpty()),
+ Default(SCL->hasPrefix("fun") || SCL->hasPrefix("src")), SM(SM) {}
+
+ProfileList::~ProfileList() = default;
+
+static StringRef getSectionName(CodeGenOptions::ProfileInstrKind Kind) {
+ switch (Kind) {
+ case CodeGenOptions::ProfileNone:
+ return "";
+ case CodeGenOptions::ProfileClangInstr:
+ return "clang";
+ case CodeGenOptions::ProfileIRInstr:
+ return "llvm";
+ case CodeGenOptions::ProfileCSIRInstr:
+ return "csllvm";
+ }
+}
+
+llvm::Optional<bool>
+ProfileList::isFunctionExcluded(StringRef FunctionName,
+ CodeGenOptions::ProfileInstrKind Kind) const {
+ StringRef Section = getSectionName(Kind);
+ if (SCL->inSection(Section, "!fun", FunctionName))
+ return true;
+ if (SCL->inSection(Section, "fun", FunctionName))
+ return false;
+ return None;
+}
+
+llvm::Optional<bool>
+ProfileList::isLocationExcluded(SourceLocation Loc,
+ CodeGenOptions::ProfileInstrKind Kind) const {
+ return isFileExcluded(SM.getFilename(SM.getFileLoc(Loc)), Kind);
+}
+
+llvm::Optional<bool>
+ProfileList::isFileExcluded(StringRef FileName,
+ CodeGenOptions::ProfileInstrKind Kind) const {
+ StringRef Section = getSectionName(Kind);
+ if (SCL->inSection(Section, "!src", FileName))
+ return true;
+ if (SCL->inSection(Section, "src", FileName))
+ return false;
+ return None;
+}
diff --git a/clang/lib/Basic/SourceLocation.cpp b/clang/lib/Basic/SourceLocation.cpp
index c1fa406909fe..6f6412028d77 100644
--- a/clang/lib/Basic/SourceLocation.cpp
+++ b/clang/lib/Basic/SourceLocation.cpp
@@ -14,6 +14,8 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -40,6 +42,23 @@ void PrettyStackTraceLoc::print(raw_ostream &OS) const {
// SourceLocation
//===----------------------------------------------------------------------===//
+static_assert(std::is_trivially_destructible<SourceLocation>::value,
+ "SourceLocation must be trivially destructible because it is "
+ "used in unions");
+
+static_assert(std::is_trivially_destructible<SourceRange>::value,
+ "SourceRange must be trivially destructible because it is "
+ "used in unions");
+
+unsigned SourceLocation::getHashValue() const {
+ return llvm::DenseMapInfo<unsigned>::getHashValue(ID);
+}
+
+void llvm::FoldingSetTrait<SourceLocation>::Profile(
+ const SourceLocation &X, llvm::FoldingSetNodeID &ID) {
+ ID.AddInteger(X.ID);
+}
+
void SourceLocation::print(raw_ostream &OS, const SourceManager &SM)const{
if (!isValid()) {
OS << "<invalid loc>";
@@ -245,7 +264,7 @@ const char *FullSourceLoc::getCharacterData(bool *Invalid) const {
StringRef FullSourceLoc::getBufferData(bool *Invalid) const {
assert(isValid());
- return SrcMgr->getBuffer(SrcMgr->getFileID(*this), Invalid)->getBuffer();
+ return SrcMgr->getBufferData(SrcMgr->getFileID(*this), Invalid);
}
std::pair<FileID, unsigned> FullSourceLoc::getDecomposedLoc() const {
diff --git a/clang/lib/Basic/SourceManager.cpp b/clang/lib/Basic/SourceManager.cpp
index 0a76c78cd44f..c0b22837693b 100644
--- a/clang/lib/Basic/SourceManager.cpp
+++ b/clang/lib/Basic/SourceManager.cpp
@@ -49,28 +49,22 @@ using llvm::MemoryBuffer;
// SourceManager Helper Classes
//===----------------------------------------------------------------------===//
-ContentCache::~ContentCache() {
- if (shouldFreeBuffer())
- delete Buffer.getPointer();
-}
-
/// getSizeBytesMapped - Returns the number of bytes actually mapped for this
/// ContentCache. This can be 0 if the MemBuffer was not actually expanded.
unsigned ContentCache::getSizeBytesMapped() const {
- return Buffer.getPointer() ? Buffer.getPointer()->getBufferSize() : 0;
+ return Buffer ? Buffer->getBufferSize() : 0;
}
/// Returns the kind of memory used to back the memory buffer for
/// this content cache. This is used for performance analysis.
llvm::MemoryBuffer::BufferKind ContentCache::getMemoryBufferKind() const {
- assert(Buffer.getPointer());
+ assert(Buffer);
// Should be unreachable, but keep for sanity.
- if (!Buffer.getPointer())
+ if (!Buffer)
return llvm::MemoryBuffer::MemoryBuffer_Malloc;
- const llvm::MemoryBuffer *buf = Buffer.getPointer();
- return buf->getBufferKind();
+ return Buffer->getBufferKind();
}
/// getSize - Returns the size of the content encapsulated by this ContentCache.
@@ -78,21 +72,8 @@ llvm::MemoryBuffer::BufferKind ContentCache::getMemoryBufferKind() const {
/// scratch buffer. If the ContentCache encapsulates a source file, that
/// file is not lazily brought in from disk to satisfy this query.
unsigned ContentCache::getSize() const {
- return Buffer.getPointer() ? (unsigned) Buffer.getPointer()->getBufferSize()
- : (unsigned) ContentsEntry->getSize();
-}
-
-void ContentCache::replaceBuffer(const llvm::MemoryBuffer *B, bool DoNotFree) {
- if (B && B == Buffer.getPointer()) {
- assert(0 && "Replacing with the same buffer");
- Buffer.setInt(DoNotFree? DoNotFreeFlag : 0);
- return;
- }
-
- if (shouldFreeBuffer())
- delete Buffer.getPointer();
- Buffer.setPointer(B);
- Buffer.setInt((B && DoNotFree) ? DoNotFreeFlag : 0);
+ return Buffer ? (unsigned)Buffer->getBufferSize()
+ : (unsigned)ContentsEntry->getSize();
}
const char *ContentCache::getInvalidBOM(StringRef BufStr) {
@@ -118,44 +99,21 @@ const char *ContentCache::getInvalidBOM(StringRef BufStr) {
return InvalidBOM;
}
-const llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
- FileManager &FM,
- SourceLocation Loc,
- bool *Invalid) const {
+llvm::Optional<llvm::MemoryBufferRef>
+ContentCache::getBufferOrNone(DiagnosticsEngine &Diag, FileManager &FM,
+ SourceLocation Loc) const {
// Lazily create the Buffer for ContentCaches that wrap files. If we already
// computed it, just return what we have.
- if (Buffer.getPointer() || !ContentsEntry) {
- if (Invalid)
- *Invalid = isBufferInvalid();
-
- return Buffer.getPointer();
- }
-
- // Check that the file's size fits in an 'unsigned' (with room for a
- // past-the-end value). This is deeply regrettable, but various parts of
- // Clang (including elsewhere in this file!) use 'unsigned' to represent file
- // offsets, line numbers, string literal lengths, and so on, and fail
- // miserably on large source files.
- if ((uint64_t)ContentsEntry->getSize() >=
- std::numeric_limits<unsigned>::max()) {
- // We can't make a memory buffer of the required size, so just make a small
- // one. We should never hit a situation where we've already parsed to a
- // later offset of the file, so it shouldn't matter that the buffer is
- // smaller than the file.
- Buffer.setPointer(
- llvm::MemoryBuffer::getMemBuffer("", ContentsEntry->getName())
- .release());
- if (Diag.isDiagnosticInFlight())
- Diag.SetDelayedDiagnostic(diag::err_file_too_large,
- ContentsEntry->getName());
- else
- Diag.Report(Loc, diag::err_file_too_large)
- << ContentsEntry->getName();
+ if (IsBufferInvalid)
+ return None;
+ if (Buffer)
+ return Buffer->getMemBufferRef();
+ if (!ContentsEntry)
+ return None;
- Buffer.setInt(Buffer.getInt() | InvalidFlag);
- if (Invalid) *Invalid = true;
- return Buffer.getPointer();
- }
+ // Start with the assumption that the buffer is invalid to simplify early
+ // return paths.
+ IsBufferInvalid = true;
auto BufferOrError = FM.getBufferForFile(ContentsEntry, IsFileVolatile);
@@ -164,20 +122,7 @@ const llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
// exists. Most likely, we were using a stat cache with an invalid entry but
// the file could also have been removed during processing. Since we can't
// really deal with this situation, just create an empty buffer.
- //
- // FIXME: This is definitely not ideal, but our immediate clients can't
- // currently handle returning a null entry here. Ideally we should detect
- // that we are in an inconsistent situation and error out as quickly as
- // possible.
if (!BufferOrError) {
- StringRef FillStr("<<<MISSING SOURCE FILE>>>\n");
- auto BackupBuffer = llvm::WritableMemoryBuffer::getNewUninitMemBuffer(
- ContentsEntry->getSize(), "<invalid>");
- char *Ptr = BackupBuffer->getBufferStart();
- for (unsigned i = 0, e = ContentsEntry->getSize(); i != e; ++i)
- Ptr[i] = FillStr[i % FillStr.size()];
- Buffer.setPointer(BackupBuffer.release());
-
if (Diag.isDiagnosticInFlight())
Diag.SetDelayedDiagnostic(diag::err_cannot_open_file,
ContentsEntry->getName(),
@@ -186,17 +131,36 @@ const llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
Diag.Report(Loc, diag::err_cannot_open_file)
<< ContentsEntry->getName() << BufferOrError.getError().message();
- Buffer.setInt(Buffer.getInt() | InvalidFlag);
-
- if (Invalid) *Invalid = true;
- return Buffer.getPointer();
+ return None;
}
- Buffer.setPointer(BufferOrError->release());
+ Buffer = std::move(*BufferOrError);
- // Check that the file's size is the same as in the file entry (which may
+ // Check that the file's size fits in an 'unsigned' (with room for a
+ // past-the-end value). This is deeply regrettable, but various parts of
+ // Clang (including elsewhere in this file!) use 'unsigned' to represent file
+ // offsets, line numbers, string literal lengths, and so on, and fail
+ // miserably on large source files.
+ //
+ // Note: ContentsEntry could be a named pipe, in which case
+ // ContentsEntry::getSize() could have the wrong size. Use
+ // MemoryBuffer::getBufferSize() instead.
+ if (Buffer->getBufferSize() >= std::numeric_limits<unsigned>::max()) {
+ if (Diag.isDiagnosticInFlight())
+ Diag.SetDelayedDiagnostic(diag::err_file_too_large,
+ ContentsEntry->getName());
+ else
+ Diag.Report(Loc, diag::err_file_too_large)
+ << ContentsEntry->getName();
+
+ return None;
+ }
+
+ // Unless this is a named pipe (in which case we can handle a mismatch),
+ // check that the file's size is the same as in the file entry (which may
// have come from a stat cache).
- if (getRawBuffer()->getBufferSize() != (size_t)ContentsEntry->getSize()) {
+ if (!ContentsEntry->isNamedPipe() &&
+ Buffer->getBufferSize() != (size_t)ContentsEntry->getSize()) {
if (Diag.isDiagnosticInFlight())
Diag.SetDelayedDiagnostic(diag::err_file_modified,
ContentsEntry->getName());
@@ -204,27 +168,24 @@ const llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
Diag.Report(Loc, diag::err_file_modified)
<< ContentsEntry->getName();
- Buffer.setInt(Buffer.getInt() | InvalidFlag);
- if (Invalid) *Invalid = true;
- return Buffer.getPointer();
+ return None;
}
// If the buffer is valid, check to see if it has a UTF Byte Order Mark
// (BOM). We only support UTF-8 with and without a BOM right now. See
// http://en.wikipedia.org/wiki/Byte_order_mark for more information.
- StringRef BufStr = Buffer.getPointer()->getBuffer();
+ StringRef BufStr = Buffer->getBuffer();
const char *InvalidBOM = getInvalidBOM(BufStr);
if (InvalidBOM) {
Diag.Report(Loc, diag::err_unsupported_bom)
<< InvalidBOM << ContentsEntry->getName();
- Buffer.setInt(Buffer.getInt() | InvalidFlag);
+ return None;
}
- if (Invalid)
- *Invalid = isBufferInvalid();
-
- return Buffer.getPointer();
+ // Buffer has been validated.
+ IsBufferInvalid = false;
+ return Buffer->getMemBufferRef();
}
unsigned LineTableInfo::getLineTableFilenameID(StringRef Name) {
@@ -389,12 +350,11 @@ void SourceManager::clearIDTables() {
createExpansionLoc(SourceLocation(), SourceLocation(), SourceLocation(), 1);
}
-bool SourceManager::isMainFile(FileEntryRef SourceFile) {
+bool SourceManager::isMainFile(const FileEntry &SourceFile) {
assert(MainFileID.isValid() && "expected initialized SourceManager");
- auto FE = getFileEntryRefForID(MainFileID);
- if (!FE)
- return false;
- return FE->getUID() == SourceFile.getUID();
+ if (auto *FE = getFileEntryForID(MainFileID))
+ return FE->getUID() == SourceFile.getUID();
+ return false;
}
void SourceManager::initializeForReplay(const SourceManager &Old) {
@@ -407,7 +367,7 @@ void SourceManager::initializeForReplay(const SourceManager &Old) {
Clone->BufferOverridden = Cache->BufferOverridden;
Clone->IsFileVolatile = Cache->IsFileVolatile;
Clone->IsTransient = Cache->IsTransient;
- Clone->replaceBuffer(Cache->getRawBuffer(), /*DoNotFree*/true);
+ Clone->setUnownedBuffer(Cache->getBufferIfLoaded());
return Clone;
};
@@ -425,16 +385,12 @@ void SourceManager::initializeForReplay(const SourceManager &Old) {
}
}
-/// getOrCreateContentCache - Create or return a cached ContentCache for the
-/// specified file.
-const ContentCache *
-SourceManager::getOrCreateContentCache(const FileEntry *FileEnt,
- bool isSystemFile) {
- assert(FileEnt && "Didn't specify a file entry to use?");
-
+ContentCache &SourceManager::getOrCreateContentCache(FileEntryRef FileEnt,
+ bool isSystemFile) {
// Do we already have information about this file?
ContentCache *&Entry = FileInfos[FileEnt];
- if (Entry) return Entry;
+ if (Entry)
+ return *Entry;
// Nope, create a new Cache entry.
Entry = ContentCacheAlloc.Allocate<ContentCache>();
@@ -456,21 +412,21 @@ SourceManager::getOrCreateContentCache(const FileEntry *FileEnt,
Entry->IsFileVolatile = UserFilesAreVolatile && !isSystemFile;
Entry->IsTransient = FilesAreTransient;
+ Entry->BufferOverridden |= FileEnt.isNamedPipe();
- return Entry;
+ return *Entry;
}
/// Create a new ContentCache for the specified memory buffer.
/// This does no caching.
-const ContentCache *
-SourceManager::createMemBufferContentCache(const llvm::MemoryBuffer *Buffer,
- bool DoNotFree) {
+ContentCache &SourceManager::createMemBufferContentCache(
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
// Add a new ContentCache to the MemBufferInfos list and return it.
ContentCache *Entry = ContentCacheAlloc.Allocate<ContentCache>();
new (Entry) ContentCache();
MemBufferInfos.push_back(Entry);
- Entry->replaceBuffer(Buffer, DoNotFree);
- return Entry;
+ Entry->setBuffer(std::move(Buffer));
+ return *Entry;
}
const SrcMgr::SLocEntry &SourceManager::loadSLocEntry(unsigned Index,
@@ -482,9 +438,11 @@ const SrcMgr::SLocEntry &SourceManager::loadSLocEntry(unsigned Index,
// If the file of the SLocEntry changed we could still have loaded it.
if (!SLocEntryLoaded[Index]) {
// Try to recover; create a SLocEntry so the rest of clang can handle it.
- LoadedSLocEntryTable[Index] = SLocEntry::get(
- 0, FileInfo::get(SourceLocation(), getFakeContentCacheForRecovery(),
- SrcMgr::C_User, ""));
+ if (!FakeSLocEntryForRecovery)
+ FakeSLocEntryForRecovery = std::make_unique<SLocEntry>(SLocEntry::get(
+ 0, FileInfo::get(SourceLocation(), getFakeContentCacheForRecovery(),
+ SrcMgr::C_User, "")));
+ return *FakeSLocEntryForRecovery;
}
}
@@ -507,24 +465,22 @@ SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
/// As part of recovering from missing or changed content, produce a
/// fake, non-empty buffer.
-llvm::MemoryBuffer *SourceManager::getFakeBufferForRecovery() const {
+llvm::MemoryBufferRef SourceManager::getFakeBufferForRecovery() const {
if (!FakeBufferForRecovery)
FakeBufferForRecovery =
llvm::MemoryBuffer::getMemBuffer("<<<INVALID BUFFER>>");
- return FakeBufferForRecovery.get();
+ return *FakeBufferForRecovery;
}
/// As part of recovering from missing or changed content, produce a
/// fake content cache.
-const SrcMgr::ContentCache *
-SourceManager::getFakeContentCacheForRecovery() const {
+SrcMgr::ContentCache &SourceManager::getFakeContentCacheForRecovery() const {
if (!FakeContentCacheForRecovery) {
FakeContentCacheForRecovery = std::make_unique<SrcMgr::ContentCache>();
- FakeContentCacheForRecovery->replaceBuffer(getFakeBufferForRecovery(),
- /*DoNotFree=*/true);
+ FakeContentCacheForRecovery->setUnownedBuffer(getFakeBufferForRecovery());
}
- return FakeContentCacheForRecovery.get();
+ return *FakeContentCacheForRecovery;
}
/// Returns the previous in-order FileID or an invalid FileID if there
@@ -576,23 +532,24 @@ FileID SourceManager::createFileID(const FileEntry *SourceFile,
SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
int LoadedID, unsigned LoadedOffset) {
- assert(SourceFile && "Null source file!");
- const SrcMgr::ContentCache *IR =
- getOrCreateContentCache(SourceFile, isSystem(FileCharacter));
- assert(IR && "getOrCreateContentCache() cannot return NULL");
- return createFileID(IR, SourceFile->getName(), IncludePos, FileCharacter,
- LoadedID, LoadedOffset);
+ return createFileID(SourceFile->getLastRef(), IncludePos, FileCharacter,
+ LoadedID, LoadedOffset);
}
FileID SourceManager::createFileID(FileEntryRef SourceFile,
SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
int LoadedID, unsigned LoadedOffset) {
- const SrcMgr::ContentCache *IR = getOrCreateContentCache(
- &SourceFile.getFileEntry(), isSystem(FileCharacter));
- assert(IR && "getOrCreateContentCache() cannot return NULL");
- return createFileID(IR, SourceFile.getName(), IncludePos, FileCharacter,
- LoadedID, LoadedOffset);
+ SrcMgr::ContentCache &IR = getOrCreateContentCache(SourceFile,
+ isSystem(FileCharacter));
+
+ // If this is a named pipe, immediately load the buffer to ensure subsequent
+ // calls to ContentCache::getSize() are accurate.
+ if (IR.ContentsEntry->isNamedPipe())
+ (void)IR.getBufferOrNone(Diag, getFileManager(), SourceLocation());
+
+ return createFileIDImpl(IR, SourceFile.getName(), IncludePos, FileCharacter,
+ LoadedID, LoadedOffset);
}
/// Create a new FileID that represents the specified memory buffer.
@@ -604,22 +561,20 @@ FileID SourceManager::createFileID(std::unique_ptr<llvm::MemoryBuffer> Buffer,
int LoadedID, unsigned LoadedOffset,
SourceLocation IncludeLoc) {
StringRef Name = Buffer->getBufferIdentifier();
- return createFileID(
- createMemBufferContentCache(Buffer.release(), /*DoNotFree*/ false),
- Name, IncludeLoc, FileCharacter, LoadedID, LoadedOffset);
+ return createFileIDImpl(createMemBufferContentCache(std::move(Buffer)), Name,
+ IncludeLoc, FileCharacter, LoadedID, LoadedOffset);
}
/// Create a new FileID that represents the specified memory buffer.
///
/// This does not take ownership of the MemoryBuffer. The memory buffer must
/// outlive the SourceManager.
-FileID SourceManager::createFileID(UnownedTag, const llvm::MemoryBuffer *Buffer,
+FileID SourceManager::createFileID(const llvm::MemoryBufferRef &Buffer,
SrcMgr::CharacteristicKind FileCharacter,
int LoadedID, unsigned LoadedOffset,
SourceLocation IncludeLoc) {
- return createFileID(createMemBufferContentCache(Buffer, /*DoNotFree*/ true),
- Buffer->getBufferIdentifier(), IncludeLoc,
- FileCharacter, LoadedID, LoadedOffset);
+ return createFileID(llvm::MemoryBuffer::getMemBuffer(Buffer), FileCharacter,
+ LoadedID, LoadedOffset, IncludeLoc);
}
/// Get the FileID for \p SourceFile if it exists. Otherwise, create a
@@ -635,10 +590,10 @@ SourceManager::getOrCreateFileID(const FileEntry *SourceFile,
/// createFileID - Create a new FileID for the specified ContentCache and
/// include position. This works regardless of whether the ContentCache
/// corresponds to a file or some other input source.
-FileID SourceManager::createFileID(const ContentCache *File, StringRef Filename,
- SourceLocation IncludePos,
- SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID, unsigned LoadedOffset) {
+FileID SourceManager::createFileIDImpl(ContentCache &File, StringRef Filename,
+ SourceLocation IncludePos,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset) {
if (LoadedID < 0) {
assert(LoadedID != -1 && "Loading sentinel FileID");
unsigned Index = unsigned(-LoadedID) - 2;
@@ -649,7 +604,7 @@ FileID SourceManager::createFileID(const ContentCache *File, StringRef Filename,
SLocEntryLoaded[Index] = true;
return FileID::get(LoadedID);
}
- unsigned FileSize = File->getSize();
+ unsigned FileSize = File.getSize();
if (!(NextLocalOffset + FileSize + 1 > NextLocalOffset &&
NextLocalOffset + FileSize + 1 <= CurrentLoadedOffset)) {
Diag.Report(IncludePos, diag::err_include_too_large);
@@ -723,21 +678,18 @@ SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
return SourceLocation::getMacroLoc(NextLocalOffset - (TokLength + 1));
}
-const llvm::MemoryBuffer *
-SourceManager::getMemoryBufferForFile(const FileEntry *File, bool *Invalid) {
- const SrcMgr::ContentCache *IR = getOrCreateContentCache(File);
- assert(IR && "getOrCreateContentCache() cannot return NULL");
- return IR->getBuffer(Diag, getFileManager(), SourceLocation(), Invalid);
+llvm::Optional<llvm::MemoryBufferRef>
+SourceManager::getMemoryBufferForFileOrNone(const FileEntry *File) {
+ SrcMgr::ContentCache &IR = getOrCreateContentCache(File->getLastRef());
+ return IR.getBufferOrNone(Diag, getFileManager(), SourceLocation());
}
-void SourceManager::overrideFileContents(const FileEntry *SourceFile,
- llvm::MemoryBuffer *Buffer,
- bool DoNotFree) {
- const SrcMgr::ContentCache *IR = getOrCreateContentCache(SourceFile);
- assert(IR && "getOrCreateContentCache() cannot return NULL");
+void SourceManager::overrideFileContents(
+ const FileEntry *SourceFile, std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+ SrcMgr::ContentCache &IR = getOrCreateContentCache(SourceFile->getLastRef());
- const_cast<SrcMgr::ContentCache *>(IR)->replaceBuffer(Buffer, DoNotFree);
- const_cast<SrcMgr::ContentCache *>(IR)->BufferOverridden = true;
+ IR.setBuffer(std::move(Buffer));
+ IR.BufferOverridden = true;
getOverriddenFilesInfo().OverriddenFilesWithBuffer.insert(SourceFile);
}
@@ -753,56 +705,51 @@ void SourceManager::overrideFileContents(const FileEntry *SourceFile,
getOverriddenFilesInfo().OverriddenFiles[SourceFile] = NewFile;
}
-const FileEntry *
-SourceManager::bypassFileContentsOverride(const FileEntry &File) {
- assert(isFileOverridden(&File));
- llvm::Optional<FileEntryRef> BypassFile =
- FileMgr.getBypassFile(FileEntryRef(File.getName(), File));
+Optional<FileEntryRef>
+SourceManager::bypassFileContentsOverride(FileEntryRef File) {
+ assert(isFileOverridden(&File.getFileEntry()));
+ llvm::Optional<FileEntryRef> BypassFile = FileMgr.getBypassFile(File);
// If the file can't be found in the FS, give up.
if (!BypassFile)
- return nullptr;
+ return None;
- const FileEntry *FE = &BypassFile->getFileEntry();
- (void)getOrCreateContentCache(FE);
- return FE;
+ (void)getOrCreateContentCache(*BypassFile);
+ return BypassFile;
}
void SourceManager::setFileIsTransient(const FileEntry *File) {
- const SrcMgr::ContentCache *CC = getOrCreateContentCache(File);
- const_cast<SrcMgr::ContentCache *>(CC)->IsTransient = true;
+ getOrCreateContentCache(File->getLastRef()).IsTransient = true;
}
-Optional<FileEntryRef> SourceManager::getFileEntryRefForID(FileID FID) const {
- bool Invalid = false;
- const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
- if (Invalid || !Entry.isFile())
- return None;
-
- const SrcMgr::ContentCache *Content = Entry.getFile().getContentCache();
- if (!Content || !Content->OrigEntry)
- return None;
- return FileEntryRef(Entry.getFile().getName(), *Content->OrigEntry);
+Optional<StringRef>
+SourceManager::getNonBuiltinFilenameForID(FileID FID) const {
+ if (const SrcMgr::SLocEntry *Entry = getSLocEntryForFile(FID))
+ if (Entry->getFile().getContentCache().OrigEntry)
+ return Entry->getFile().getName();
+ return None;
}
StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
- bool MyInvalid = false;
- const SLocEntry &SLoc = getSLocEntry(FID, &MyInvalid);
- if (!SLoc.isFile() || MyInvalid) {
- if (Invalid)
- *Invalid = true;
- return "<<<<<INVALID SOURCE LOCATION>>>>>";
- }
-
- const llvm::MemoryBuffer *Buf = SLoc.getFile().getContentCache()->getBuffer(
- Diag, getFileManager(), SourceLocation(), &MyInvalid);
+ auto B = getBufferDataOrNone(FID);
if (Invalid)
- *Invalid = MyInvalid;
+ *Invalid = !B;
+ return B ? *B : "<<<<<INVALID SOURCE LOCATION>>>>>";
+}
- if (MyInvalid)
- return "<<<<<INVALID SOURCE LOCATION>>>>>";
+llvm::Optional<StringRef>
+SourceManager::getBufferDataIfLoaded(FileID FID) const {
+ if (const SrcMgr::SLocEntry *Entry = getSLocEntryForFile(FID))
+ return Entry->getFile().getContentCache().getBufferDataIfLoaded();
+ return None;
+}
- return Buf->getBuffer();
+llvm::Optional<StringRef> SourceManager::getBufferDataOrNone(FileID FID) const {
+ if (const SrcMgr::SLocEntry *Entry = getSLocEntryForFile(FID))
+ if (auto B = Entry->getFile().getContentCache().getBufferOrNone(
+ Diag, getFileManager(), SourceLocation()))
+ return B->getBuffer();
+ return None;
}
//===----------------------------------------------------------------------===//
@@ -1219,24 +1166,24 @@ const char *SourceManager::getCharacterData(SourceLocation SL,
return "<<<<INVALID BUFFER>>>>";
}
- const llvm::MemoryBuffer *Buffer =
- Entry.getFile().getContentCache()->getBuffer(
- Diag, getFileManager(), SourceLocation(), &CharDataInvalid);
+ llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ Entry.getFile().getContentCache().getBufferOrNone(Diag, getFileManager(),
+ SourceLocation());
if (Invalid)
- *Invalid = CharDataInvalid;
- return Buffer->getBufferStart() + (CharDataInvalid? 0 : LocInfo.second);
+ *Invalid = !Buffer;
+ return Buffer ? Buffer->getBufferStart() + LocInfo.second
+ : "<<<<INVALID BUFFER>>>>";
}
/// getColumnNumber - Return the column # for the specified file position.
/// this is significantly cheaper to compute than the line number.
unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
bool *Invalid) const {
- bool MyInvalid = false;
- const llvm::MemoryBuffer *MemBuf = getBuffer(FID, &MyInvalid);
+ llvm::Optional<llvm::MemoryBufferRef> MemBuf = getBufferOrNone(FID);
if (Invalid)
- *Invalid = MyInvalid;
+ *Invalid = !MemBuf;
- if (MyInvalid)
+ if (!MemBuf)
return 1;
// It is okay to request a position just past the end of the buffer.
@@ -1249,10 +1196,10 @@ unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
const char *Buf = MemBuf->getBufferStart();
// See if we just calculated the line number for this FilePos and can use
// that to lookup the start of the line instead of searching for it.
- if (LastLineNoFileIDQuery == FID &&
- LastLineNoContentCache->SourceLineCache != nullptr &&
- LastLineNoResult < LastLineNoContentCache->NumLines) {
- unsigned *SourceLineCache = LastLineNoContentCache->SourceLineCache;
+ if (LastLineNoFileIDQuery == FID && LastLineNoContentCache->SourceLineCache &&
+ LastLineNoResult < LastLineNoContentCache->SourceLineCache.size()) {
+ const unsigned *SourceLineCache =
+ LastLineNoContentCache->SourceLineCache.begin();
unsigned LineStart = SourceLineCache[LastLineNoResult - 1];
unsigned LineEnd = SourceLineCache[LastLineNoResult];
if (FilePos >= LineStart && FilePos < LineEnd) {
@@ -1309,19 +1256,8 @@ unsigned SourceManager::getPresumedColumnNumber(SourceLocation Loc,
#include <emmintrin.h>
#endif
-static LLVM_ATTRIBUTE_NOINLINE void
-ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
- llvm::BumpPtrAllocator &Alloc,
- const SourceManager &SM, bool &Invalid);
-static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
- llvm::BumpPtrAllocator &Alloc,
- const SourceManager &SM, bool &Invalid) {
- // Note that calling 'getBuffer()' may lazily page in the file.
- const MemoryBuffer *Buffer =
- FI->getBuffer(Diag, SM.getFileManager(), SourceLocation(), &Invalid);
- if (Invalid)
- return;
-
+LineOffsetMapping LineOffsetMapping::get(llvm::MemoryBufferRef Buffer,
+ llvm::BumpPtrAllocator &Alloc) {
// Find the file offsets of all of the *physical* source lines. This does
// not look at trigraphs, escaped newlines, or anything else tricky.
SmallVector<unsigned, 256> LineOffsets;
@@ -1329,8 +1265,8 @@ static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
// Line #1 starts at char 0.
LineOffsets.push_back(0);
- const unsigned char *Buf = (const unsigned char *)Buffer->getBufferStart();
- const unsigned char *End = (const unsigned char *)Buffer->getBufferEnd();
+ const unsigned char *Buf = (const unsigned char *)Buffer.getBufferStart();
+ const unsigned char *End = (const unsigned char *)Buffer.getBufferEnd();
const std::size_t BufLen = End - Buf;
unsigned I = 0;
while (I < BufLen) {
@@ -1345,10 +1281,14 @@ static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
++I;
}
- // Copy the offsets into the FileInfo structure.
- FI->NumLines = LineOffsets.size();
- FI->SourceLineCache = Alloc.Allocate<unsigned>(LineOffsets.size());
- std::copy(LineOffsets.begin(), LineOffsets.end(), FI->SourceLineCache);
+ return LineOffsetMapping(LineOffsets, Alloc);
+}
+
+LineOffsetMapping::LineOffsetMapping(ArrayRef<unsigned> LineOffsets,
+ llvm::BumpPtrAllocator &Alloc)
+ : Storage(Alloc.Allocate<unsigned>(LineOffsets.size() + 1)) {
+ Storage[0] = LineOffsets.size();
+ std::copy(LineOffsets.begin(), LineOffsets.end(), Storage + 1);
}
/// getLineNumber - Given a SourceLocation, return the spelling line number
@@ -1363,7 +1303,7 @@ unsigned SourceManager::getLineNumber(FileID FID, unsigned FilePos,
return 1;
}
- ContentCache *Content;
+ const ContentCache *Content;
if (LastLineNoFileIDQuery == FID)
Content = LastLineNoContentCache;
else {
@@ -1375,26 +1315,29 @@ unsigned SourceManager::getLineNumber(FileID FID, unsigned FilePos,
return 1;
}
- Content = const_cast<ContentCache*>(Entry.getFile().getContentCache());
+ Content = &Entry.getFile().getContentCache();
}
// If this is the first use of line information for this buffer, compute the
/// SourceLineCache for it on demand.
if (!Content->SourceLineCache) {
- bool MyInvalid = false;
- ComputeLineNumbers(Diag, Content, ContentCacheAlloc, *this, MyInvalid);
+ llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ Content->getBufferOrNone(Diag, getFileManager(), SourceLocation());
if (Invalid)
- *Invalid = MyInvalid;
- if (MyInvalid)
+ *Invalid = !Buffer;
+ if (!Buffer)
return 1;
+
+ Content->SourceLineCache =
+ LineOffsetMapping::get(*Buffer, ContentCacheAlloc);
} else if (Invalid)
*Invalid = false;
// Okay, we know we have a line number table. Do a binary search to find the
// line number that this character position lands on.
- unsigned *SourceLineCache = Content->SourceLineCache;
- unsigned *SourceLineCacheStart = SourceLineCache;
- unsigned *SourceLineCacheEnd = SourceLineCache + Content->NumLines;
+ const unsigned *SourceLineCache = Content->SourceLineCache.begin();
+ const unsigned *SourceLineCacheStart = SourceLineCache;
+ const unsigned *SourceLineCacheEnd = Content->SourceLineCache.end();
unsigned QueriedFilePos = FilePos+1;
@@ -1433,13 +1376,13 @@ unsigned SourceManager::getLineNumber(FileID FID, unsigned FilePos,
}
}
} else {
- if (LastLineNoResult < Content->NumLines)
+ if (LastLineNoResult < Content->SourceLineCache.size())
SourceLineCacheEnd = SourceLineCache+LastLineNoResult+1;
}
}
- unsigned *Pos
- = std::lower_bound(SourceLineCache, SourceLineCacheEnd, QueriedFilePos);
+ const unsigned *Pos =
+ std::lower_bound(SourceLineCache, SourceLineCacheEnd, QueriedFilePos);
unsigned LineNo = Pos-SourceLineCacheStart;
LastLineNoFileIDQuery = FID;
@@ -1480,12 +1423,11 @@ SrcMgr::CharacteristicKind
SourceManager::getFileCharacteristic(SourceLocation Loc) const {
assert(Loc.isValid() && "Can't get file characteristic of invalid loc!");
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
- bool Invalid = false;
- const SLocEntry &SEntry = getSLocEntry(LocInfo.first, &Invalid);
- if (Invalid || !SEntry.isFile())
+ const SLocEntry *SEntry = getSLocEntryForFile(LocInfo.first);
+ if (!SEntry)
return C_User;
- const SrcMgr::FileInfo &FI = SEntry.getFile();
+ const SrcMgr::FileInfo &FI = SEntry->getFile();
// If there are no #line directives in this file, just return the whole-file
// state.
@@ -1511,7 +1453,10 @@ StringRef SourceManager::getBufferName(SourceLocation Loc,
bool *Invalid) const {
if (isInvalid(Loc, Invalid)) return "<invalid loc>";
- return getBuffer(getFileID(Loc), Invalid)->getBufferIdentifier();
+ auto B = getBufferOrNone(getFileID(Loc));
+ if (Invalid)
+ *Invalid = !B;
+ return B ? B->getBufferIdentifier() : "<invalid buffer>";
}
/// getPresumedLoc - This method returns the "presumed" location of a
@@ -1534,7 +1479,7 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc,
return PresumedLoc();
const SrcMgr::FileInfo &FI = Entry.getFile();
- const SrcMgr::ContentCache *C = FI.getContentCache();
+ const SrcMgr::ContentCache *C = &FI.getContentCache();
// To get the source name, first consult the FileEntry (if one exists)
// before the MemBuffer as this will avoid unnecessarily paging in the
@@ -1543,8 +1488,8 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc,
StringRef Filename;
if (C->OrigEntry)
Filename = C->OrigEntry->getName();
- else
- Filename = C->getBuffer(Diag, getFileManager())->getBufferIdentifier();
+ else if (auto Buffer = C->getBufferOrNone(Diag, getFileManager()))
+ Filename = Buffer->getBufferIdentifier();
unsigned LineNo = getLineNumber(LocInfo.first, LocInfo.second, &Invalid);
if (Invalid)
@@ -1603,12 +1548,11 @@ bool SourceManager::isInMainFile(SourceLocation Loc) const {
// Presumed locations are always for expansion points.
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
- bool Invalid = false;
- const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
- if (Invalid || !Entry.isFile())
+ const SLocEntry *Entry = getSLocEntryForFile(LocInfo.first);
+ if (!Entry)
return false;
- const SrcMgr::FileInfo &FI = Entry.getFile();
+ const SrcMgr::FileInfo &FI = Entry->getFile();
// Check if there is a line directive for this location.
if (FI.hasLineDirectives())
@@ -1673,9 +1617,7 @@ FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
return FileID();
if (MainSLoc.isFile()) {
- const ContentCache *MainContentCache =
- MainSLoc.getFile().getContentCache();
- if (MainContentCache && MainContentCache->OrigEntry == SourceFile)
+ if (MainSLoc.getFile().getContentCache().OrigEntry == SourceFile)
return MainFileID;
}
}
@@ -1684,16 +1626,16 @@ FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
// through all of the local source locations.
for (unsigned I = 0, N = local_sloc_entry_size(); I != N; ++I) {
const SLocEntry &SLoc = getLocalSLocEntry(I);
- if (SLoc.isFile() && SLoc.getFile().getContentCache() &&
- SLoc.getFile().getContentCache()->OrigEntry == SourceFile)
+ if (SLoc.isFile() &&
+ SLoc.getFile().getContentCache().OrigEntry == SourceFile)
return FileID::get(I);
}
// If that still didn't help, try the modules.
for (unsigned I = 0, N = loaded_sloc_entry_size(); I != N; ++I) {
const SLocEntry &SLoc = getLoadedSLocEntry(I);
- if (SLoc.isFile() && SLoc.getFile().getContentCache() &&
- SLoc.getFile().getContentCache()->OrigEntry == SourceFile)
+ if (SLoc.isFile() &&
+ SLoc.getFile().getContentCache().OrigEntry == SourceFile)
return FileID::get(-int(I) - 2);
}
@@ -1725,28 +1667,25 @@ SourceLocation SourceManager::translateLineCol(FileID FID,
if (Line == 1 && Col == 1)
return FileLoc;
- ContentCache *Content
- = const_cast<ContentCache *>(Entry.getFile().getContentCache());
- if (!Content)
- return SourceLocation();
+ const ContentCache *Content = &Entry.getFile().getContentCache();
// If this is the first use of line information for this buffer, compute the
// SourceLineCache for it on demand.
- if (!Content->SourceLineCache) {
- bool MyInvalid = false;
- ComputeLineNumbers(Diag, Content, ContentCacheAlloc, *this, MyInvalid);
- if (MyInvalid)
- return SourceLocation();
- }
+ llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ Content->getBufferOrNone(Diag, getFileManager());
+ if (!Buffer)
+ return SourceLocation();
+ if (!Content->SourceLineCache)
+ Content->SourceLineCache =
+ LineOffsetMapping::get(*Buffer, ContentCacheAlloc);
- if (Line > Content->NumLines) {
- unsigned Size = Content->getBuffer(Diag, getFileManager())->getBufferSize();
+ if (Line > Content->SourceLineCache.size()) {
+ unsigned Size = Buffer->getBufferSize();
if (Size > 0)
--Size;
return FileLoc.getLocWithOffset(Size);
}
- const llvm::MemoryBuffer *Buffer = Content->getBuffer(Diag, getFileManager());
unsigned FilePos = Content->SourceLineCache[Line - 1];
const char *Buf = Buffer->getBufferStart() + FilePos;
unsigned BufLength = Buffer->getBufferSize() - FilePos;
@@ -1791,13 +1730,18 @@ void SourceManager::computeMacroArgsCache(MacroArgsMap &MacroArgsCache,
if (Invalid)
return;
if (Entry.isFile()) {
- SourceLocation IncludeLoc = Entry.getFile().getIncludeLoc();
+ auto& File = Entry.getFile();
+ if (File.getFileCharacteristic() == C_User_ModuleMap ||
+ File.getFileCharacteristic() == C_System_ModuleMap)
+ continue;
+
+ SourceLocation IncludeLoc = File.getIncludeLoc();
bool IncludedInFID =
(IncludeLoc.isValid() && isInFileID(IncludeLoc, FID)) ||
// Predefined header doesn't have a valid include location in main
// file, but any files created by it should still be skipped when
// computing macro args expanded in the main file.
- (FID == MainFileID && Entry.getFile().Filename == "<built-in>");
+ (FID == MainFileID && Entry.getFile().getName() == "<built-in>");
if (IncludedInFID) {
// Skip the files/macros of the #include'd file, we only care about
// macros that lexed macro arguments from our file.
@@ -1936,6 +1880,11 @@ SourceManager::getMacroArgExpandedLocation(SourceLocation Loc) const {
assert(!MacroArgsCache->empty());
MacroArgsMap::iterator I = MacroArgsCache->upper_bound(Offset);
+ // In case every element in MacroArgsCache is greater than Offset we can't
+ // decrement the iterator.
+ if (I == MacroArgsCache->begin())
+ return Loc;
+
--I;
unsigned MacroArgBeginOffs = I->first;
@@ -2040,8 +1989,8 @@ bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
// If we arrived here, the location is either in a built-ins buffer or
// associated with global inline asm. PR5662 and PR22576 are examples.
- StringRef LB = getBuffer(LOffs.first)->getBufferIdentifier();
- StringRef RB = getBuffer(ROffs.first)->getBufferIdentifier();
+ StringRef LB = getBufferOrFake(LOffs.first).getBufferIdentifier();
+ StringRef RB = getBufferOrFake(ROffs.first).getBufferIdentifier();
bool LIsBuiltins = LB == "<built-in>";
bool RIsBuiltins = RB == "<built-in>";
// Sort built-in before non-built-in.
@@ -2142,7 +2091,7 @@ void SourceManager::PrintStats() const {
unsigned NumLineNumsComputed = 0;
unsigned NumFileBytesMapped = 0;
for (fileinfo_iterator I = fileinfo_begin(), E = fileinfo_end(); I != E; ++I){
- NumLineNumsComputed += I->second->SourceLineCache != nullptr;
+ NumLineNumsComputed += bool(I->second->SourceLineCache);
NumFileBytesMapped += I->second->getSizeBytesMapped();
}
unsigned NumMacroArgsComputed = MacroArgsCacheMap.size();
@@ -2172,16 +2121,15 @@ LLVM_DUMP_METHOD void SourceManager::dump() const {
<< ">\n";
if (FI.getIncludeLoc().isValid())
out << " included from " << FI.getIncludeLoc().getOffset() << "\n";
- if (auto *CC = FI.getContentCache()) {
- out << " for " << (CC->OrigEntry ? CC->OrigEntry->getName() : "<none>")
+ auto &CC = FI.getContentCache();
+ out << " for " << (CC.OrigEntry ? CC.OrigEntry->getName() : "<none>")
+ << "\n";
+ if (CC.BufferOverridden)
+ out << " contents overridden\n";
+ if (CC.ContentsEntry != CC.OrigEntry) {
+ out << " contents from "
+ << (CC.ContentsEntry ? CC.ContentsEntry->getName() : "<none>")
<< "\n";
- if (CC->BufferOverridden)
- out << " contents overridden\n";
- if (CC->ContentsEntry != CC->OrigEntry) {
- out << " contents from "
- << (CC->ContentsEntry ? CC->ContentsEntry->getName() : "<none>")
- << "\n";
- }
}
} else {
auto &EI = Entry.getExpansion();
diff --git a/clang/lib/Basic/TargetID.cpp b/clang/lib/Basic/TargetID.cpp
new file mode 100644
index 000000000000..59d416f0e015
--- /dev/null
+++ b/clang/lib/Basic/TargetID.cpp
@@ -0,0 +1,169 @@
+//===--- TargetID.cpp - Utilities for parsing target ID -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TargetID.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+
+namespace clang {
+
+static const llvm::SmallVector<llvm::StringRef, 4>
+getAllPossibleAMDGPUTargetIDFeatures(const llvm::Triple &T,
+ llvm::StringRef Proc) {
+ // Entries in returned vector should be in alphabetical order.
+ llvm::SmallVector<llvm::StringRef, 4> Ret;
+ auto ProcKind = T.isAMDGCN() ? llvm::AMDGPU::parseArchAMDGCN(Proc)
+ : llvm::AMDGPU::parseArchR600(Proc);
+ if (ProcKind == llvm::AMDGPU::GK_NONE)
+ return Ret;
+ auto Features = T.isAMDGCN() ? llvm::AMDGPU::getArchAttrAMDGCN(ProcKind)
+ : llvm::AMDGPU::getArchAttrR600(ProcKind);
+ if (Features & llvm::AMDGPU::FEATURE_SRAMECC)
+ Ret.push_back("sramecc");
+ if (Features & llvm::AMDGPU::FEATURE_XNACK)
+ Ret.push_back("xnack");
+ return Ret;
+}
+
+const llvm::SmallVector<llvm::StringRef, 4>
+getAllPossibleTargetIDFeatures(const llvm::Triple &T,
+ llvm::StringRef Processor) {
+ llvm::SmallVector<llvm::StringRef, 4> Ret;
+ if (T.isAMDGPU())
+ return getAllPossibleAMDGPUTargetIDFeatures(T, Processor);
+ return Ret;
+}
+
+/// Returns canonical processor name or empty string if \p Processor is invalid.
+static llvm::StringRef getCanonicalProcessorName(const llvm::Triple &T,
+ llvm::StringRef Processor) {
+ if (T.isAMDGPU())
+ return llvm::AMDGPU::getCanonicalArchName(T, Processor);
+ return Processor;
+}
+
+llvm::StringRef getProcessorFromTargetID(const llvm::Triple &T,
+ llvm::StringRef TargetID) {
+ auto Split = TargetID.split(':');
+ return getCanonicalProcessorName(T, Split.first);
+}
+
+// Parse a target ID with format checking only. Do not check whether processor
+// name or features are valid for the processor.
+//
+// A target ID is a processor name followed by a list of target features
+// delimited by colon. Each target feature is a string post-fixed by a plus
+// or minus sign, e.g. gfx908:sramecc+:xnack-.
+static llvm::Optional<llvm::StringRef>
+parseTargetIDWithFormatCheckingOnly(llvm::StringRef TargetID,
+ llvm::StringMap<bool> *FeatureMap) {
+ llvm::StringRef Processor;
+
+ if (TargetID.empty())
+ return llvm::StringRef();
+
+ auto Split = TargetID.split(':');
+ Processor = Split.first;
+ if (Processor.empty())
+ return llvm::None;
+
+ auto Features = Split.second;
+ if (Features.empty())
+ return Processor;
+
+ llvm::StringMap<bool> LocalFeatureMap;
+ if (!FeatureMap)
+ FeatureMap = &LocalFeatureMap;
+
+ while (!Features.empty()) {
+ auto Splits = Features.split(':');
+ auto Sign = Splits.first.back();
+ auto Feature = Splits.first.drop_back();
+ if (Sign != '+' && Sign != '-')
+ return llvm::None;
+ bool IsOn = Sign == '+';
+ auto Loc = FeatureMap->find(Feature);
+ // Each feature can only show up at most once in target ID.
+ if (Loc != FeatureMap->end())
+ return llvm::None;
+ (*FeatureMap)[Feature] = IsOn;
+ Features = Splits.second;
+ }
+ return Processor;
+}
+
+llvm::Optional<llvm::StringRef>
+parseTargetID(const llvm::Triple &T, llvm::StringRef TargetID,
+ llvm::StringMap<bool> *FeatureMap) {
+ auto OptionalProcessor =
+ parseTargetIDWithFormatCheckingOnly(TargetID, FeatureMap);
+
+ if (!OptionalProcessor)
+ return llvm::None;
+
+ llvm::StringRef Processor =
+ getCanonicalProcessorName(T, OptionalProcessor.getValue());
+ if (Processor.empty())
+ return llvm::None;
+
+ llvm::SmallSet<llvm::StringRef, 4> AllFeatures;
+ for (auto &&F : getAllPossibleTargetIDFeatures(T, Processor))
+ AllFeatures.insert(F);
+
+ for (auto &&F : *FeatureMap)
+ if (!AllFeatures.count(F.first()))
+ return llvm::None;
+
+ return Processor;
+}
+
+// A canonical target ID is a target ID containing a canonical processor name
+// and features in alphabetical order.
+std::string getCanonicalTargetID(llvm::StringRef Processor,
+ const llvm::StringMap<bool> &Features) {
+ std::string TargetID = Processor.str();
+ std::map<const llvm::StringRef, bool> OrderedMap;
+ for (const auto &F : Features)
+ OrderedMap[F.first()] = F.second;
+ for (auto F : OrderedMap)
+ TargetID = TargetID + ':' + F.first.str() + (F.second ? "+" : "-");
+ return TargetID;
+}
+
+// For a specific processor, a feature either shows up in all target IDs, or
+// does not show up in any target IDs. Otherwise the target ID combination
+// is invalid.
+llvm::Optional<std::pair<llvm::StringRef, llvm::StringRef>>
+getConflictTargetIDCombination(const std::set<llvm::StringRef> &TargetIDs) {
+ struct Info {
+ llvm::StringRef TargetID;
+ llvm::StringMap<bool> Features;
+ };
+ llvm::StringMap<Info> FeatureMap;
+ for (auto &&ID : TargetIDs) {
+ llvm::StringMap<bool> Features;
+ llvm::StringRef Proc =
+ parseTargetIDWithFormatCheckingOnly(ID, &Features).getValue();
+ auto Loc = FeatureMap.find(Proc);
+ if (Loc == FeatureMap.end())
+ FeatureMap[Proc] = Info{ID, Features};
+ else {
+ auto &ExistingFeatures = Loc->second.Features;
+ if (llvm::any_of(Features, [&](auto &F) {
+ return ExistingFeatures.count(F.first()) == 0;
+ }))
+ return std::make_pair(Loc->second.TargetID, ID);
+ }
+ }
+ return llvm::None;
+}
+
+} // namespace clang
diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp
index eccdc21d724a..642ee753d224 100644
--- a/clang/lib/Basic/TargetInfo.cpp
+++ b/clang/lib/Basic/TargetInfo.cpp
@@ -115,6 +115,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
HasBuiltinMSVaList = false;
IsRenderScriptTarget = false;
HasAArch64SVETypes = false;
+ AllowAMDGPUUnsafeFPAtomics = false;
ARMCDECoprocMask = 0;
// Default to no types using fpret.
diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp
index 6bbcafa27dfe..90a67d03b7b2 100644
--- a/clang/lib/Basic/Targets.cpp
+++ b/clang/lib/Basic/Targets.cpp
@@ -334,6 +334,16 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new PPC32TargetInfo(Triple, Opts);
}
+ case llvm::Triple::ppcle:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ default:
+ return new PPC32TargetInfo(Triple, Opts);
+ }
+
case llvm::Triple::ppc64:
if (Triple.isOSDarwin())
return new DarwinPPC64TargetInfo(Triple, Opts);
@@ -346,6 +356,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new FreeBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
case llvm::Triple::AIX:
return new AIXPPC64TargetInfo(Triple, Opts);
default:
@@ -356,8 +368,12 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
switch (os) {
case llvm::Triple::Linux:
return new LinuxTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
default:
return new PPC64TargetInfo(Triple, Opts);
}
@@ -387,6 +403,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
switch (os) {
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<RISCV64TargetInfo>(Triple, Opts);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<RISCV64TargetInfo>(Triple, Opts);
case llvm::Triple::Fuchsia:
return new FuchsiaTargetInfo<RISCV64TargetInfo>(Triple, Opts);
case llvm::Triple::Linux:
@@ -403,8 +421,6 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new SolarisTargetInfo<SparcV8TargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<SparcV8TargetInfo>(Triple, Opts);
- case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<SparcV8TargetInfo>(Triple, Opts);
case llvm::Triple::RTEMS:
return new RTEMSTargetInfo<SparcV8TargetInfo>(Triple, Opts);
default:
@@ -418,8 +434,6 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new LinuxTargetInfo<SparcV8elTargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<SparcV8elTargetInfo>(Triple, Opts);
- case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<SparcV8elTargetInfo>(Triple, Opts);
case llvm::Triple::RTEMS:
return new RTEMSTargetInfo<SparcV8elTargetInfo>(Triple, Opts);
default:
@@ -446,6 +460,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
switch (os) {
case llvm::Triple::Linux:
return new LinuxTargetInfo<SystemZTargetInfo>(Triple, Opts);
+ case llvm::Triple::ZOS:
+ return new ZOSTargetInfo<SystemZTargetInfo>(Triple, Opts);
default:
return new SystemZTargetInfo(Triple, Opts);
}
@@ -648,6 +664,17 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
return nullptr;
}
+ // Check the TuneCPU name if specified.
+ if (!Opts->TuneCPU.empty() &&
+ !Target->isValidTuneCPUName(Opts->TuneCPU)) {
+ Diags.Report(diag::err_target_unknown_cpu) << Opts->TuneCPU;
+ SmallVector<StringRef, 32> ValidList;
+ Target->fillValidTuneCPUList(ValidList);
+ if (!ValidList.empty())
+ Diags.Report(diag::note_valid_options) << llvm::join(ValidList, ", ");
+ return nullptr;
+ }
+
// Set the target ABI if specified.
if (!Opts->ABI.empty() && !Target->setABI(Opts->ABI)) {
Diags.Report(diag::err_target_unknown_abi) << Opts->ABI;
@@ -662,14 +689,13 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
// Compute the default target features, we need the target to handle this
// because features may have dependencies on one another.
- llvm::StringMap<bool> Features;
- if (!Target->initFeatureMap(Features, Diags, Opts->CPU,
+ if (!Target->initFeatureMap(Opts->FeatureMap, Diags, Opts->CPU,
Opts->FeaturesAsWritten))
return nullptr;
// Add the features to the compile options.
Opts->Features.clear();
- for (const auto &F : Features)
+ for (const auto &F : Opts->FeatureMap)
Opts->Features.push_back((F.getValue() ? "+" : "-") + F.getKey().str());
// Sort here, so we handle the features in a predictable order. (This matters
// when we're dealing with features that overlap.)
@@ -679,7 +705,7 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
return nullptr;
Target->setSupportedOpenCLOpts();
- Target->setOpenCLExtensionOpts();
+ Target->setCommandLineOpenCLOpts();
Target->setMaxAtomicWidth();
if (!Target->validateTarget(Diags))
@@ -689,3 +715,29 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
return Target.release();
}
+
+/// getOpenCLFeatureDefines - Define OpenCL macros based on target settings
+/// and language version
+void TargetInfo::getOpenCLFeatureDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+
+ auto defineOpenCLExtMacro = [&](llvm::StringRef Name, unsigned AvailVer,
+ unsigned CoreVersions,
+ unsigned OptionalVersions) {
+ // Check if extension is supported by target and is available in this
+ // OpenCL version
+ auto It = getTargetOpts().OpenCLFeaturesMap.find(Name);
+ if ((It != getTargetOpts().OpenCLFeaturesMap.end()) && It->getValue() &&
+ OpenCLOptions::OpenCLOptionInfo(AvailVer, CoreVersions,
+ OptionalVersions)
+ .isAvailableIn(Opts))
+ Builder.defineMacro(Name);
+ };
+#define OPENCL_GENERIC_EXTENSION(Ext, Avail, Core, Opt) \
+ defineOpenCLExtMacro(#Ext, Avail, Core, Opt);
+#include "clang/Basic/OpenCLExtensions.def"
+
+ // FIXME: OpenCL options which affect language semantics/syntax
+ // should be moved into LangOptions, thus macro definitions of
+ // such options is better to be done in clang::InitializePreprocessor
+}
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 25c02cb888c1..f17134623b8b 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -155,8 +155,9 @@ void AArch64TargetInfo::fillValidCPUList(
void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
MacroBuilder &Builder) const {
- // FIXME: Armv8.1 makes __ARM_FEATURE_CRC32 mandatory. Handle it here.
Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
+ Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
+ Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
}
void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
@@ -176,8 +177,6 @@ void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Also include the Armv8.3 defines
- // FIXME: Armv8.4 makes __ARM_FEATURE_ATOMICS, defined in GCC, mandatory.
- // Add and handle it here.
getTargetDefinesARMV83A(Opts, Builder);
}
@@ -197,6 +196,12 @@ void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
getTargetDefinesARMV85A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.6 defines
+ getTargetDefinesARMV86A(Opts, Builder);
+}
+
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
@@ -304,6 +309,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasMatMul)
Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
+ if (HasLSE)
+ Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
+
if (HasBFloat16) {
Builder.defineMacro("__ARM_FEATURE_BF16", "1");
Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
@@ -348,6 +356,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (Opts.BranchTargetEnforcement)
Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
+ if (HasLS64)
+ Builder.defineMacro("__ARM_FEATURE_LS64", "1");
+
switch (ArchKind) {
default:
break;
@@ -369,6 +380,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::AArch64::ArchKind::ARMV8_6A:
getTargetDefinesARMV86A(Opts, Builder);
break;
+ case llvm::AArch64::ArchKind::ARMV8_7A:
+ getTargetDefinesARMV87A(Opts, Builder);
+ break;
}
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
@@ -376,6 +390,11 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+
+ if (Opts.ArmSveVectorBits) {
+ Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.ArmSveVectorBits));
+ Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
+ }
}
ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
@@ -404,6 +423,7 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFP16FML = false;
HasMTE = false;
HasTME = false;
+ HasLS64 = false;
HasMatMul = false;
HasBFloat16 = false;
HasSVE2 = false;
@@ -413,6 +433,7 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasSVE2BitPerm = false;
HasMatmulFP64 = false;
HasMatmulFP32 = false;
+ HasLSE = false;
ArchKind = llvm::AArch64::ArchKind::ARMV8A;
@@ -478,6 +499,10 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
if (Feature == "+v8.6a")
ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
+ if (Feature == "+v8.7a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
+ if (Feature == "+v8r")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8R;
if (Feature == "+fullfp16")
HasFullFP16 = true;
if (Feature == "+dotprod")
@@ -488,10 +513,18 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasMTE = true;
if (Feature == "+tme")
HasTME = true;
+ if (Feature == "+pauth")
+ HasPAuth = true;
if (Feature == "+i8mm")
HasMatMul = true;
if (Feature == "+bf16")
HasBFloat16 = true;
+ if (Feature == "+lse")
+ HasLSE = true;
+ if (Feature == "+ls64")
+ HasLS64 = true;
+ if (Feature == "+flagm")
+ HasFlagM = true;
}
setDataLayout();
@@ -754,7 +787,9 @@ WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
}
void WindowsARM64TargetInfo::setDataLayout() {
- resetDataLayout("e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128");
+ resetDataLayout(Triple.isOSBinFormatMachO()
+ ? "e-m:o-i64:64-i128:128-n32:64-S128"
+ : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128");
}
TargetInfo::BuiltinVaListKind
@@ -843,7 +878,7 @@ DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
UseZeroLengthBitfieldAlignment = true;
TheCXXABI.set(TargetCXXABI::WatchOS);
} else
- TheCXXABI.set(TargetCXXABI::iOS64);
+ TheCXXABI.set(TargetCXXABI::AppleARM64);
}
void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
@@ -860,6 +895,9 @@ void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
Builder.defineMacro("__arm64", "1");
Builder.defineMacro("__arm64__", "1");
+ if (Triple.isArm64e())
+ Builder.defineMacro("__arm64e__", "1");
+
getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
}
diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h
index d1982897d84e..2809fbce9c88 100644
--- a/clang/lib/Basic/Targets/AArch64.h
+++ b/clang/lib/Basic/Targets/AArch64.h
@@ -36,6 +36,8 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasFP16FML;
bool HasMTE;
bool HasTME;
+ bool HasPAuth;
+ bool HasLS64;
bool HasMatMul;
bool HasSVE2;
bool HasSVE2AES;
@@ -44,6 +46,8 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasSVE2BitPerm;
bool HasMatmulFP64;
bool HasMatmulFP32;
+ bool HasLSE;
+ bool HasFlagM;
llvm::AArch64::ArchKind ArchKind;
@@ -80,6 +84,8 @@ public:
MacroBuilder &Builder) const;
void getTargetDefinesARMV86A(const LangOptions &Opts,
MacroBuilder &Builder) const;
+ void getTargetDefinesARMV87A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
diff --git a/clang/lib/Basic/Targets/AMDGPU.cpp b/clang/lib/Basic/Targets/AMDGPU.cpp
index db7db8d36d03..91c1e83f61cb 100644
--- a/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -31,12 +31,12 @@ namespace targets {
static const char *const DataLayoutStringR600 =
"e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
- "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
static const char *const DataLayoutStringAMDGCN =
"e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
"-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
- "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
"-ni:7";
const LangASMap AMDGPUTargetInfo::AMDGPUDefIsGenMap = {
@@ -46,6 +46,8 @@ const LangASMap AMDGPUTargetInfo::AMDGPUDefIsGenMap = {
Constant, // opencl_constant
Private, // opencl_private
Generic, // opencl_generic
+ Global, // opencl_global_device
+ Global, // opencl_global_host
Global, // cuda_device
Constant, // cuda_constant
Local, // cuda_shared
@@ -61,6 +63,8 @@ const LangASMap AMDGPUTargetInfo::AMDGPUDefIsPrivMap = {
Constant, // opencl_constant
Private, // opencl_private
Generic, // opencl_generic
+ Global, // opencl_global_device
+ Global, // opencl_global_host
Global, // cuda_device
Constant, // cuda_constant
Local, // cuda_shared
@@ -170,6 +174,9 @@ bool AMDGPUTargetInfo::initFeatureMap(
// XXX - What does the member GPU mean if device name string passed here?
if (isAMDGCN(getTriple())) {
switch (llvm::AMDGPU::parseArchAMDGCN(CPU)) {
+ case GK_GFX1033:
+ case GK_GFX1032:
+ case GK_GFX1031:
case GK_GFX1030:
Features["ci-insts"] = true;
Features["dot1-insts"] = true;
@@ -216,6 +223,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["dot1-insts"] = true;
Features["dot2-insts"] = true;
LLVM_FALLTHROUGH;
+ case GK_GFX90C:
case GK_GFX909:
case GK_GFX904:
case GK_GFX902:
@@ -223,6 +231,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["gfx9-insts"] = true;
LLVM_FALLTHROUGH;
case GK_GFX810:
+ case GK_GFX805:
case GK_GFX803:
case GK_GFX802:
case GK_GFX801:
@@ -231,6 +240,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["dpp"] = true;
Features["s-memrealtime"] = true;
LLVM_FALLTHROUGH;
+ case GK_GFX705:
case GK_GFX704:
case GK_GFX703:
case GK_GFX702:
@@ -239,6 +249,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["ci-insts"] = true;
Features["flat-address-space"] = true;
LLVM_FALLTHROUGH;
+ case GK_GFX602:
case GK_GFX601:
case GK_GFX600:
break;
@@ -311,6 +322,8 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
HasLegalHalfType = true;
HasFloat16 = true;
+ WavefrontSize = GPUFeatures & llvm::AMDGPU::FEATURE_WAVE32 ? 32 : 64;
+ AllowAMDGPUUnsafeFPAtomics = Opts.AllowAMDGPUUnsafeFPAtomics;
// Set pointer width and alignment for target address space 0.
PointerWidth = PointerAlign = DataLayout->getPointerSizeInBits();
@@ -352,6 +365,23 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
StringRef CanonName = isAMDGCN(getTriple()) ?
getArchNameAMDGCN(GPUKind) : getArchNameR600(GPUKind);
Builder.defineMacro(Twine("__") + Twine(CanonName) + Twine("__"));
+ if (isAMDGCN(getTriple())) {
+ Builder.defineMacro("__amdgcn_processor__",
+ Twine("\"") + Twine(CanonName) + Twine("\""));
+ Builder.defineMacro("__amdgcn_target_id__",
+ Twine("\"") + Twine(getTargetID().getValue()) +
+ Twine("\""));
+ for (auto F : getAllPossibleTargetIDFeatures(getTriple(), CanonName)) {
+ auto Loc = OffloadArchFeatures.find(F);
+ if (Loc != OffloadArchFeatures.end()) {
+ std::string NewF = F.str();
+ std::replace(NewF.begin(), NewF.end(), '-', '_');
+ Builder.defineMacro(Twine("__amdgcn_feature_") + Twine(NewF) +
+ Twine("__"),
+ Loc->second ? "1" : "0");
+ }
+ }
+ }
}
// TODO: __HAS_FMAF__, __HAS_LDEXPF__, __HAS_FP64__ are deprecated and will be
@@ -366,6 +396,8 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__HAS_FP64__");
if (hasFastFMA())
Builder.defineMacro("FP_FAST_FMA");
+
+ Builder.defineMacro("__AMDGCN_WAVEFRONT_SIZE", Twine(WavefrontSize));
}
void AMDGPUTargetInfo::setAuxTarget(const TargetInfo *Aux) {
diff --git a/clang/lib/Basic/Targets/AMDGPU.h b/clang/lib/Basic/Targets/AMDGPU.h
index d0394492cad6..8ee0ca30d305 100644
--- a/clang/lib/Basic/Targets/AMDGPU.h
+++ b/clang/lib/Basic/Targets/AMDGPU.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H
#define LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H
+#include "clang/Basic/TargetID.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "llvm/ADT/StringSet.h"
@@ -40,6 +41,15 @@ class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
llvm::AMDGPU::GPUKind GPUKind;
unsigned GPUFeatures;
+ unsigned WavefrontSize;
+
+ /// Target ID is device name followed by optional feature name postfixed
+ /// by plus or minus sign delimitted by colon, e.g. gfx908:xnack+:sramecc-.
+ /// If the target ID contains feature+, map it to true.
+ /// If the target ID contains feature-, map it to false.
+ /// If the target ID does not contain a feature (default), do not map it.
+ llvm::StringMap<bool> OffloadArchFeatures;
+ std::string TargetID;
bool hasFP64() const {
return getTriple().getArch() == llvm::Triple::amdgcn ||
@@ -274,32 +284,32 @@ public:
void setSupportedOpenCLOpts() override {
auto &Opts = getSupportedOpenCLOpts();
- Opts.support("cl_clang_storage_class_specifiers");
- Opts.support("cl_khr_icd");
+ Opts["cl_clang_storage_class_specifiers"] = true;
+ Opts["__cl_clang_variadic_functions"] = true;
+ Opts["__cl_clang_function_pointers"] = true;
bool IsAMDGCN = isAMDGCN(getTriple());
- if (hasFP64())
- Opts.support("cl_khr_fp64");
+ Opts["cl_khr_fp64"] = hasFP64();
if (IsAMDGCN || GPUKind >= llvm::AMDGPU::GK_CEDAR) {
- Opts.support("cl_khr_byte_addressable_store");
- Opts.support("cl_khr_global_int32_base_atomics");
- Opts.support("cl_khr_global_int32_extended_atomics");
- Opts.support("cl_khr_local_int32_base_atomics");
- Opts.support("cl_khr_local_int32_extended_atomics");
+ Opts["cl_khr_byte_addressable_store"] = true;
+ Opts["cl_khr_global_int32_base_atomics"] = true;
+ Opts["cl_khr_global_int32_extended_atomics"] = true;
+ Opts["cl_khr_local_int32_base_atomics"] = true;
+ Opts["cl_khr_local_int32_extended_atomics"] = true;
}
if (IsAMDGCN) {
- Opts.support("cl_khr_fp16");
- Opts.support("cl_khr_int64_base_atomics");
- Opts.support("cl_khr_int64_extended_atomics");
- Opts.support("cl_khr_mipmap_image");
- Opts.support("cl_khr_mipmap_image_writes");
- Opts.support("cl_khr_subgroups");
- Opts.support("cl_khr_3d_image_writes");
- Opts.support("cl_amd_media_ops");
- Opts.support("cl_amd_media_ops2");
+ Opts["cl_khr_fp16"] = true;
+ Opts["cl_khr_int64_base_atomics"] = true;
+ Opts["cl_khr_int64_extended_atomics"] = true;
+ Opts["cl_khr_mipmap_image"] = true;
+ Opts["cl_khr_mipmap_image_writes"] = true;
+ Opts["cl_khr_subgroups"] = true;
+ Opts["cl_khr_3d_image_writes"] = true;
+ Opts["cl_amd_media_ops"] = true;
+ Opts["cl_amd_media_ops2"] = true;
}
}
@@ -389,6 +399,37 @@ public:
void setAuxTarget(const TargetInfo *Aux) override;
bool hasExtIntType() const override { return true; }
+
+ // Record offload arch features since they are needed for defining the
+ // pre-defined macros.
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override {
+ auto TargetIDFeatures =
+ getAllPossibleTargetIDFeatures(getTriple(), getArchNameAMDGCN(GPUKind));
+ llvm::for_each(Features, [&](const auto &F) {
+ assert(F.front() == '+' || F.front() == '-');
+ if (F == "+wavefrontsize64")
+ WavefrontSize = 64;
+ bool IsOn = F.front() == '+';
+ StringRef Name = StringRef(F).drop_front();
+ if (llvm::find(TargetIDFeatures, Name) == TargetIDFeatures.end())
+ return;
+ assert(OffloadArchFeatures.find(Name) == OffloadArchFeatures.end());
+ OffloadArchFeatures[Name] = IsOn;
+ });
+ return true;
+ }
+
+ Optional<std::string> getTargetID() const override {
+ if (!isAMDGCN(getTriple()))
+ return llvm::None;
+ // When -target-cpu is not set, we assume generic code that it is valid
+ // for all GPU and use an empty string as target ID to represent that.
+ if (GPUKind == llvm::AMDGPU::GK_NONE)
+ return std::string("");
+ return getCanonicalTargetID(getArchNameAMDGCN(GPUKind),
+ OffloadArchFeatures);
+ }
};
} // namespace targets
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index 21cfe0107bbb..a2c96ad12a76 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -208,6 +208,8 @@ StringRef ARMTargetInfo::getCPUAttr() const {
return "8_5A";
case llvm::ARM::ArchKind::ARMV8_6A:
return "8_6A";
+ case llvm::ARM::ArchKind::ARMV8_7A:
+ return "8_7A";
case llvm::ARM::ArchKind::ARMV8MBaseline:
return "8M_BASE";
case llvm::ARM::ArchKind::ARMV8MMainline:
diff --git a/clang/lib/Basic/Targets/AVR.cpp b/clang/lib/Basic/Targets/AVR.cpp
index bb215b4114ac..664eea0de841 100644
--- a/clang/lib/Basic/Targets/AVR.cpp
+++ b/clang/lib/Basic/Targets/AVR.cpp
@@ -63,8 +63,10 @@ static MCUInfo AVRMcus[] = {
{"attiny85", "__AVR_ATtiny85__"},
{"attiny261", "__AVR_ATtiny261__"},
{"attiny261a", "__AVR_ATtiny261A__"},
+ {"attiny441", "__AVR_ATtiny441__"},
{"attiny461", "__AVR_ATtiny461__"},
{"attiny461a", "__AVR_ATtiny461A__"},
+ {"attiny841", "__AVR_ATtiny841__"},
{"attiny861", "__AVR_ATtiny861__"},
{"attiny861a", "__AVR_ATtiny861A__"},
{"attiny87", "__AVR_ATtiny87__"},
@@ -92,11 +94,13 @@ static MCUInfo AVRMcus[] = {
{"atmega48", "__AVR_ATmega48__"},
{"atmega48a", "__AVR_ATmega48A__"},
{"atmega48pa", "__AVR_ATmega48PA__"},
+ {"atmega48pb", "__AVR_ATmega48PB__"},
{"atmega48p", "__AVR_ATmega48P__"},
{"atmega88", "__AVR_ATmega88__"},
{"atmega88a", "__AVR_ATmega88A__"},
{"atmega88p", "__AVR_ATmega88P__"},
{"atmega88pa", "__AVR_ATmega88PA__"},
+ {"atmega88pb", "__AVR_ATmega88PB__"},
{"atmega8515", "__AVR_ATmega8515__"},
{"atmega8535", "__AVR_ATmega8535__"},
{"atmega8hva", "__AVR_ATmega8HVA__"},
@@ -124,6 +128,7 @@ static MCUInfo AVRMcus[] = {
{"atmega168a", "__AVR_ATmega168A__"},
{"atmega168p", "__AVR_ATmega168P__"},
{"atmega168pa", "__AVR_ATmega168PA__"},
+ {"atmega168pb", "__AVR_ATmega168PB__"},
{"atmega169", "__AVR_ATmega169__"},
{"atmega169a", "__AVR_ATmega169A__"},
{"atmega169p", "__AVR_ATmega169P__"},
@@ -134,6 +139,7 @@ static MCUInfo AVRMcus[] = {
{"atmega324a", "__AVR_ATmega324A__"},
{"atmega324p", "__AVR_ATmega324P__"},
{"atmega324pa", "__AVR_ATmega324PA__"},
+ {"atmega324pb", "__AVR_ATmega324PB__"},
{"atmega325", "__AVR_ATmega325__"},
{"atmega325a", "__AVR_ATmega325A__"},
{"atmega325p", "__AVR_ATmega325P__"},
@@ -144,6 +150,7 @@ static MCUInfo AVRMcus[] = {
{"atmega3250pa", "__AVR_ATmega3250PA__"},
{"atmega328", "__AVR_ATmega328__"},
{"atmega328p", "__AVR_ATmega328P__"},
+ {"atmega328pb", "__AVR_ATmega328PB__"},
{"atmega329", "__AVR_ATmega329__"},
{"atmega329a", "__AVR_ATmega329A__"},
{"atmega329p", "__AVR_ATmega329P__"},
diff --git a/clang/lib/Basic/Targets/Hexagon.cpp b/clang/lib/Basic/Targets/Hexagon.cpp
index 205601c359d0..a8b4380b6a87 100644
--- a/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/clang/lib/Basic/Targets/Hexagon.cpp
@@ -98,7 +98,8 @@ bool HexagonTargetInfo::initFeatureMap(
StringRef CPUFeature = CPU;
CPUFeature.consume_front("hexagon");
CPUFeature.consume_back("t");
- Features[CPUFeature] = true;
+ if (!CPUFeature.empty())
+ Features[CPUFeature] = true;
Features["long-calls"] = false;
diff --git a/clang/lib/Basic/Targets/MSP430.cpp b/clang/lib/Basic/Targets/MSP430.cpp
index ef53ee352c32..90890500ae27 100644
--- a/clang/lib/Basic/Targets/MSP430.cpp
+++ b/clang/lib/Basic/Targets/MSP430.cpp
@@ -29,5 +29,6 @@ void MSP430TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("MSP430");
Builder.defineMacro("__MSP430__");
+ Builder.defineMacro("__ELF__");
// FIXME: defines for different 'flavours' of MCU
}
diff --git a/clang/lib/Basic/Targets/Mips.cpp b/clang/lib/Basic/Targets/Mips.cpp
index ead5e91f7c8f..3a32fd492c6b 100644
--- a/clang/lib/Basic/Targets/Mips.cpp
+++ b/clang/lib/Basic/Targets/Mips.cpp
@@ -41,7 +41,6 @@ bool MipsTargetInfo::processorSupportsGPR64() const {
.Case("octeon", true)
.Case("octeon+", true)
.Default(false);
- return false;
}
static constexpr llvm::StringLiteral ValidCPUNames[] = {
diff --git a/clang/lib/Basic/Targets/NVPTX.cpp b/clang/lib/Basic/Targets/NVPTX.cpp
index 18c3c8370331..b7f0dce33d2b 100644
--- a/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/clang/lib/Basic/Targets/NVPTX.cpp
@@ -182,14 +182,17 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
switch (GPU) {
case CudaArch::GFX600:
case CudaArch::GFX601:
+ case CudaArch::GFX602:
case CudaArch::GFX700:
case CudaArch::GFX701:
case CudaArch::GFX702:
case CudaArch::GFX703:
case CudaArch::GFX704:
+ case CudaArch::GFX705:
case CudaArch::GFX801:
case CudaArch::GFX802:
case CudaArch::GFX803:
+ case CudaArch::GFX805:
case CudaArch::GFX810:
case CudaArch::GFX900:
case CudaArch::GFX902:
@@ -197,12 +200,17 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX906:
case CudaArch::GFX908:
case CudaArch::GFX909:
+ case CudaArch::GFX90c:
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
case CudaArch::GFX1030:
+ case CudaArch::GFX1031:
+ case CudaArch::GFX1032:
+ case CudaArch::GFX1033:
case CudaArch::LAST:
break;
+ case CudaArch::UNUSED:
case CudaArch::UNKNOWN:
assert(false && "No GPU arch when compiling CUDA device code.");
return "";
diff --git a/clang/lib/Basic/Targets/NVPTX.h b/clang/lib/Basic/Targets/NVPTX.h
index f57a0f18efa3..038dec4a28bd 100644
--- a/clang/lib/Basic/Targets/NVPTX.h
+++ b/clang/lib/Basic/Targets/NVPTX.h
@@ -30,6 +30,8 @@ static const unsigned NVPTXAddrSpaceMap[] = {
0, // opencl_private
// FIXME: generic has to be added to the target
0, // opencl_generic
+ 1, // opencl_global_device
+ 1, // opencl_global_host
1, // cuda_device
4, // cuda_constant
3, // cuda_shared
@@ -125,16 +127,16 @@ public:
void setSupportedOpenCLOpts() override {
auto &Opts = getSupportedOpenCLOpts();
- Opts.support("cl_clang_storage_class_specifiers");
- Opts.support("cl_khr_gl_sharing");
- Opts.support("cl_khr_icd");
-
- Opts.support("cl_khr_fp64");
- Opts.support("cl_khr_byte_addressable_store");
- Opts.support("cl_khr_global_int32_base_atomics");
- Opts.support("cl_khr_global_int32_extended_atomics");
- Opts.support("cl_khr_local_int32_base_atomics");
- Opts.support("cl_khr_local_int32_extended_atomics");
+ Opts["cl_clang_storage_class_specifiers"] = true;
+ Opts["__cl_clang_function_pointers"] = true;
+ Opts["__cl_clang_variadic_functions"] = true;
+
+ Opts["cl_khr_fp64"] = true;
+ Opts["cl_khr_byte_addressable_store"] = true;
+ Opts["cl_khr_global_int32_base_atomics"] = true;
+ Opts["cl_khr_global_int32_extended_atomics"] = true;
+ Opts["cl_khr_local_int32_base_atomics"] = true;
+ Opts["cl_khr_local_int32_extended_atomics"] = true;
}
/// \returns If a target requires an address within a target specific address
diff --git a/clang/lib/Basic/Targets/OSTargets.h b/clang/lib/Basic/Targets/OSTargets.h
index cfa362bef1b1..67fa1a537fea 100644
--- a/clang/lib/Basic/Targets/OSTargets.h
+++ b/clang/lib/Basic/Targets/OSTargets.h
@@ -154,7 +154,8 @@ public:
MinVersion = llvm::VersionTuple(5U);
break;
default:
- llvm_unreachable("Unexpected OS");
+ // Conservatively return 8 bytes if OS is unknown.
+ return 64;
}
unsigned Major, Minor, Micro;
@@ -252,6 +253,7 @@ public:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
this->MCountName = "_mcount";
@@ -382,8 +384,12 @@ protected:
Triple.getEnvironmentVersion(Maj, Min, Rev);
this->PlatformName = "android";
this->PlatformMinVersion = VersionTuple(Maj, Min, Rev);
- if (Maj)
- Builder.defineMacro("__ANDROID_API__", Twine(Maj));
+ if (Maj) {
+ Builder.defineMacro("__ANDROID_MIN_SDK_VERSION__", Twine(Maj));
+ // This historical but ambiguous name for the minSdkVersion macro. Keep
+ // defined for compatibility.
+ Builder.defineMacro("__ANDROID_API__", "__ANDROID_MIN_SDK_VERSION__");
+ }
} else {
Builder.defineMacro("__gnu_linux__");
}
@@ -408,6 +414,7 @@ public:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
this->MCountName = "_mcount";
@@ -465,6 +472,9 @@ protected:
public:
OpenBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
+ this->WCharType = this->WIntType = this->SignedInt;
+ this->IntMaxType = TargetInfo::SignedLongLong;
+ this->Int64Type = TargetInfo::SignedLongLong;
switch (Triple.getArch()) {
case llvm::Triple::x86:
case llvm::Triple::x86_64:
@@ -476,6 +486,8 @@ public:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
case llvm::Triple::sparcv9:
this->MCountName = "_mcount";
break;
@@ -668,6 +680,9 @@ protected:
Builder.defineMacro("_AIX");
+ if (Opts.EnableAIXExtendedAltivecABI)
+ Builder.defineMacro("__EXTABI__");
+
unsigned Major, Minor, Micro;
Triple.getOSVersion(Major, Minor, Micro);
@@ -719,6 +734,66 @@ public:
// AIX sets FLT_EVAL_METHOD to be 1.
unsigned getFloatEvalMethod() const override { return 1; }
bool hasInt128Type() const override { return false; }
+
+ bool defaultsToAIXPowerAlignment() const override { return true; }
+};
+
+// z/OS target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY ZOSTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // FIXME: _LONG_LONG should not be defined under -std=c89.
+ Builder.defineMacro("_LONG_LONG");
+ Builder.defineMacro("_OPEN_DEFAULT");
+ // _UNIX03_WITHDRAWN is required to build libcxx.
+ Builder.defineMacro("_UNIX03_WITHDRAWN");
+ Builder.defineMacro("__370__");
+ Builder.defineMacro("__BFP__");
+ // FIXME: __BOOL__ should not be defined under -std=c89.
+ Builder.defineMacro("__BOOL__");
+ Builder.defineMacro("__LONGNAME__");
+ Builder.defineMacro("__MVS__");
+ Builder.defineMacro("__THW_370__");
+ Builder.defineMacro("__THW_BIG_ENDIAN__");
+ Builder.defineMacro("__TOS_390__");
+ Builder.defineMacro("__TOS_MVS__");
+ Builder.defineMacro("__XPLINK__");
+
+ if (this->PointerWidth == 64)
+ Builder.defineMacro("__64BIT__");
+
+ if (Opts.CPlusPlus) {
+ Builder.defineMacro("__DLL__");
+ // _XOPEN_SOURCE=600 is required to build libcxx.
+ Builder.defineMacro("_XOPEN_SOURCE", "600");
+ }
+
+ if (Opts.GNUMode) {
+ Builder.defineMacro("_MI_BUILTIN");
+ Builder.defineMacro("_EXT");
+ }
+
+ if (Opts.CPlusPlus && Opts.WChar) {
+ // Macro __wchar_t is defined so that the wchar_t data
+ // type is not declared as a typedef in system headers.
+ Builder.defineMacro("__wchar_t");
+ }
+
+ this->PlatformName = llvm::Triple::getOSTypeName(Triple.getOS());
+ }
+
+public:
+ ZOSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OSTargetInfo<Target>(Triple, Opts) {
+ this->WCharType = TargetInfo::UnsignedInt;
+ this->UseBitFieldTypeAlignment = false;
+ this->UseZeroLengthBitfieldAlignment = true;
+ this->ZeroLengthBitfieldBoundary = 32;
+ this->MinGlobalAlign = 0;
+ this->DefaultAlignForAttributeAligned = 128;
+ }
};
void addWindowsDefines(const llvm::Triple &Triple, const LangOptions &Opts,
diff --git a/clang/lib/Basic/Targets/PPC.cpp b/clang/lib/Basic/Targets/PPC.cpp
index f0de2bf070ea..cfede6e6e756 100644
--- a/clang/lib/Basic/Targets/PPC.cpp
+++ b/clang/lib/Basic/Targets/PPC.cpp
@@ -46,8 +46,6 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasP8Crypto = true;
} else if (Feature == "+direct-move") {
HasDirectMove = true;
- } else if (Feature == "+qpx") {
- HasQPX = true;
} else if (Feature == "+htm") {
HasHTM = true;
} else if (Feature == "+float128") {
@@ -58,12 +56,16 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasP10Vector = true;
} else if (Feature == "+pcrelative-memops") {
HasPCRelativeMemops = true;
- } else if (Feature == "+spe") {
+ } else if (Feature == "+spe" || Feature == "+efpu2") {
HasSPE = true;
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
} else if (Feature == "-hard-float") {
FloatABI = SoftFloat;
+ } else if (Feature == "+paired-vector-memops") {
+ PairedVectorMemops = true;
+ } else if (Feature == "+mma") {
+ HasMMA = true;
}
// TODO: Finish this list and add an assert that we've handled them
// all.
@@ -90,7 +92,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
}
// Target properties.
- if (getTriple().getArch() == llvm::Triple::ppc64le) {
+ if (getTriple().getArch() == llvm::Triple::ppc64le ||
+ getTriple().getArch() == llvm::Triple::ppcle) {
Builder.defineMacro("_LITTLE_ENDIAN");
} else {
if (!getTriple().isOSNetBSD() &&
@@ -99,7 +102,7 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
}
// ABI options.
- if (ABI == "elfv1" || ABI == "elfv1-qpx")
+ if (ABI == "elfv1")
Builder.defineMacro("_CALL_ELF", "1");
if (ABI == "elfv2")
Builder.defineMacro("_CALL_ELF", "2");
@@ -120,6 +123,10 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
if (LongDoubleWidth == 128) {
Builder.defineMacro("__LONG_DOUBLE_128__");
Builder.defineMacro("__LONGDOUBLE128");
+ if (Opts.PPCIEEELongDouble)
+ Builder.defineMacro("__LONG_DOUBLE_IEEE128__");
+ else
+ Builder.defineMacro("__LONG_DOUBLE_IBM128__");
}
// Define this for elfv2 (64-bit only) or 64-bit darwin.
@@ -159,22 +166,11 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("_ARCH_PWR10");
if (ArchDefs & ArchDefineA2)
Builder.defineMacro("_ARCH_A2");
- if (ArchDefs & ArchDefineA2q) {
- Builder.defineMacro("_ARCH_A2Q");
- Builder.defineMacro("_ARCH_QP");
- }
if (ArchDefs & ArchDefineE500)
Builder.defineMacro("__NO_LWSYNC__");
if (ArchDefs & ArchDefineFuture)
Builder.defineMacro("_ARCH_PWR_FUTURE");
- if (getTriple().getVendor() == llvm::Triple::BGQ) {
- Builder.defineMacro("__bg__");
- Builder.defineMacro("__THW_BLUEGENE__");
- Builder.defineMacro("__bgq__");
- Builder.defineMacro("__TOS_BGQ__");
- }
-
if (HasAltivec) {
Builder.defineMacro("__VEC__", "10206");
Builder.defineMacro("__ALTIVEC__");
@@ -195,6 +191,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__FLOAT128__");
if (HasP9Vector)
Builder.defineMacro("__POWER9_VECTOR__");
+ if (HasMMA)
+ Builder.defineMacro("__MMA__");
if (HasP10Vector)
Builder.defineMacro("__POWER10_VECTOR__");
@@ -231,6 +229,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
// - direct-move
// - float128
// - power9-vector
+// - paired-vector-memops
+// - mma
// - power10-vector
// then go ahead and error since the customer has expressed an incompatible
// set of options.
@@ -253,6 +253,8 @@ static bool ppcUserFeaturesCheck(DiagnosticsEngine &Diags,
Found |= FindVSXSubfeature("+direct-move", "-mdirect-move");
Found |= FindVSXSubfeature("+float128", "-mfloat128");
Found |= FindVSXSubfeature("+power9-vector", "-mpower9-vector");
+ Found |= FindVSXSubfeature("+paired-vector-memops", "-mpaired-vector-memops");
+ Found |= FindVSXSubfeature("+mma", "-mmma");
Found |= FindVSXSubfeature("+power10-vector", "-mpower10-vector");
// Return false if any vsx subfeatures was found.
@@ -277,7 +279,6 @@ bool PPCTargetInfo::initFeatureMap(
.Case("ppc64le", true)
.Default(false);
- Features["qpx"] = (CPU == "a2q");
Features["power9-vector"] = (CPU == "pwr9");
Features["crypto"] = llvm::StringSwitch<bool>(CPU)
.Case("ppc64le", true)
@@ -317,6 +318,9 @@ bool PPCTargetInfo::initFeatureMap(
.Case("pwr9", true)
.Case("pwr8", true)
.Default(false);
+ Features["float128"] = llvm::StringSwitch<bool>(CPU)
+ .Case("pwr9", true)
+ .Default(false);
Features["spe"] = llvm::StringSwitch<bool>(CPU)
.Case("8548", true)
@@ -347,6 +351,13 @@ bool PPCTargetInfo::initFeatureMap(
return false;
}
+ if (!(ArchDefs & ArchDefinePwr10) &&
+ llvm::find(FeaturesVec, "+mma") != FeaturesVec.end()) {
+ // We have MMA on PPC but not power 10 and above.
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mmma" << CPU;
+ return false;
+ }
+
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -354,6 +365,8 @@ bool PPCTargetInfo::initFeatureMap(
void PPCTargetInfo::addP10SpecificFeatures(
llvm::StringMap<bool> &Features) const {
Features["htm"] = false; // HTM was removed for P10.
+ Features["paired-vector-memops"] = true;
+ Features["mma"] = true;
Features["power10-vector"] = true;
Features["pcrelative-memops"] = true;
return;
@@ -373,21 +386,24 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const {
.Case("power8-vector", HasP8Vector)
.Case("crypto", HasP8Crypto)
.Case("direct-move", HasDirectMove)
- .Case("qpx", HasQPX)
.Case("htm", HasHTM)
.Case("bpermd", HasBPERMD)
.Case("extdiv", HasExtDiv)
.Case("float128", HasFloat128)
.Case("power9-vector", HasP9Vector)
+ .Case("paired-vector-memops", PairedVectorMemops)
.Case("power10-vector", HasP10Vector)
.Case("pcrelative-memops", HasPCRelativeMemops)
.Case("spe", HasSPE)
+ .Case("mma", HasMMA)
.Default(false);
}
void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
StringRef Name, bool Enabled) const {
if (Enabled) {
+ if (Name == "efpu2")
+ Features["spe"] = true;
// If we're enabling any of the vsx based features then enable vsx and
// altivec. We'll diagnose any problems later.
bool FeatureHasVSX = llvm::StringSwitch<bool>(Name)
@@ -395,8 +411,10 @@ void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
.Case("direct-move", true)
.Case("power8-vector", true)
.Case("power9-vector", true)
+ .Case("paired-vector-memops", true)
.Case("power10-vector", true)
.Case("float128", true)
+ .Case("mma", true)
.Default(false);
if (FeatureHasVSX)
Features["vsx"] = Features["altivec"] = true;
@@ -409,16 +427,21 @@ void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
else
Features[Name] = true;
} else {
+ if (Name == "spe")
+ Features["efpu2"] = false;
// If we're disabling altivec or vsx go ahead and disable all of the vsx
// features.
if ((Name == "altivec") || (Name == "vsx"))
Features["vsx"] = Features["direct-move"] = Features["power8-vector"] =
Features["float128"] = Features["power9-vector"] =
- Features["power10-vector"] = false;
+ Features["paired-vector-memops"] = Features["mma"] =
+ Features["power10-vector"] = false;
if (Name == "power8-vector")
- Features["power9-vector"] = Features["power10-vector"] = false;
+ Features["power9-vector"] = Features["paired-vector-memops"] =
+ Features["mma"] = Features["power10-vector"] = false;
else if (Name == "power9-vector")
- Features["power10-vector"] = false;
+ Features["paired-vector-memops"] = Features["mma"] =
+ Features["power10-vector"] = false;
if (Name == "pcrel")
Features["pcrelative-memops"] = false;
else
@@ -503,17 +526,17 @@ ArrayRef<TargetInfo::AddlRegName> PPCTargetInfo::getGCCAddlRegNames() const {
}
static constexpr llvm::StringLiteral ValidCPUNames[] = {
- {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
- {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
- {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
- {"7450"}, {"g4+"}, {"750"}, {"8548"}, {"970"},
- {"g5"}, {"a2"}, {"a2q"}, {"e500"}, {"e500mc"},
- {"e5500"}, {"power3"}, {"pwr3"}, {"power4"}, {"pwr4"},
- {"power5"}, {"pwr5"}, {"power5x"}, {"pwr5x"}, {"power6"},
- {"pwr6"}, {"power6x"}, {"pwr6x"}, {"power7"}, {"pwr7"},
- {"power8"}, {"pwr8"}, {"power9"}, {"pwr9"}, {"power10"},
- {"pwr10"}, {"powerpc"}, {"ppc"}, {"powerpc64"}, {"ppc64"},
- {"powerpc64le"}, {"ppc64le"}, {"future"}};
+ {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
+ {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
+ {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
+ {"7450"}, {"g4+"}, {"750"}, {"8548"}, {"970"},
+ {"g5"}, {"a2"}, {"e500"}, {"e500mc"}, {"e5500"},
+ {"power3"}, {"pwr3"}, {"power4"}, {"pwr4"}, {"power5"},
+ {"pwr5"}, {"power5x"}, {"pwr5x"}, {"power6"}, {"pwr6"},
+ {"power6x"}, {"pwr6x"}, {"power7"}, {"pwr7"}, {"power8"},
+ {"pwr8"}, {"power9"}, {"pwr9"}, {"power10"}, {"pwr10"},
+ {"powerpc"}, {"ppc"}, {"powerpc64"}, {"ppc64"}, {"powerpc64le"},
+ {"ppc64le"}, {"future"}};
bool PPCTargetInfo::isValidCPUName(StringRef Name) const {
return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
diff --git a/clang/lib/Basic/Targets/PPC.h b/clang/lib/Basic/Targets/PPC.h
index 858059bacb86..56c8f33ef221 100644
--- a/clang/lib/Basic/Targets/PPC.h
+++ b/clang/lib/Basic/Targets/PPC.h
@@ -46,7 +46,6 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
ArchDefinePwr10 = 1 << 14,
ArchDefineFuture = 1 << 15,
ArchDefineA2 = 1 << 16,
- ArchDefineA2q = 1 << 17,
ArchDefineE500 = 1 << 18
} ArchDefineTypes;
@@ -59,16 +58,17 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
// Target cpu features.
bool HasAltivec = false;
+ bool HasMMA = false;
bool HasVSX = false;
bool HasP8Vector = false;
bool HasP8Crypto = false;
bool HasDirectMove = false;
- bool HasQPX = false;
bool HasHTM = false;
bool HasBPERMD = false;
bool HasExtDiv = false;
bool HasP9Vector = false;
bool HasSPE = false;
+ bool PairedVectorMemops = false;
bool HasP10Vector = false;
bool HasPCRelativeMemops = false;
@@ -82,6 +82,7 @@ public:
SimdDefaultAlign = 128;
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
+ HasStrictFP = true;
}
// Set the language option for altivec based on our value.
@@ -118,7 +119,6 @@ public:
.Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr |
ArchDefinePpcsq)
.Case("a2", ArchDefineA2)
- .Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q)
.Cases("power3", "pwr3", ArchDefinePpcgr)
.Cases("power4", "pwr4",
ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
@@ -355,6 +355,8 @@ public:
: PPCTargetInfo(Triple, Opts) {
if (Triple.isOSAIX())
resetDataLayout("E-m:a-p:32:32-i64:64-n32");
+ else if (Triple.getArch() == llvm::Triple::ppcle)
+ resetDataLayout("e-m:e-p:32:32-i64:64-n32");
else
resetDataLayout("E-m:e-p:32:32-i64:64-n32");
@@ -370,14 +372,16 @@ public:
SizeType = UnsignedLong;
PtrDiffType = SignedLong;
IntPtrType = SignedLong;
- SuitableAlign = 64;
+ LongDoubleWidth = 64;
+ LongDoubleAlign = DoubleAlign = 32;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
break;
default:
break;
}
if (Triple.isOSFreeBSD() || Triple.isOSNetBSD() || Triple.isOSOpenBSD() ||
- Triple.getOS() == llvm::Triple::AIX || Triple.isMusl()) {
+ Triple.isMusl()) {
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
@@ -401,25 +405,31 @@ public:
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
IntMaxType = SignedLong;
Int64Type = SignedLong;
+ std::string DataLayout = "";
if (Triple.isOSAIX()) {
// TODO: Set appropriate ABI for AIX platform.
- resetDataLayout("E-m:a-i64:64-n32:64");
- SuitableAlign = 64;
+ DataLayout = "E-m:a-i64:64-n32:64";
+ LongDoubleWidth = 64;
+ LongDoubleAlign = DoubleAlign = 32;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
} else if ((Triple.getArch() == llvm::Triple::ppc64le)) {
- resetDataLayout("e-m:e-i64:64-n32:64");
+ DataLayout = "e-m:e-i64:64-n32:64";
ABI = "elfv2";
} else {
- resetDataLayout("E-m:e-i64:64-n32:64");
+ DataLayout = "E-m:e-i64:64-n32:64";
ABI = "elfv1";
}
- if (Triple.isOSFreeBSD() || Triple.getOS() == llvm::Triple::AIX ||
- Triple.isMusl()) {
+ if (Triple.isOSFreeBSD() || Triple.isOSOpenBSD() || Triple.isMusl()) {
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
+ if (Triple.isOSAIX() || Triple.isOSLinux())
+ DataLayout += "-v256:256:256-v512:512:512";
+ resetDataLayout(DataLayout);
+
// PPC64 supports atomics up to 8 bytes.
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
}
@@ -430,7 +440,7 @@ public:
// PPC64 Linux-specific ABI options.
bool setABI(const std::string &Name) override {
- if (Name == "elfv1" || Name == "elfv1-qpx" || Name == "elfv2") {
+ if (Name == "elfv1" || Name == "elfv2") {
ABI = Name;
return true;
}
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index 522776437cd2..0bf02e605740 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -13,6 +13,7 @@
#include "RISCV.h"
#include "clang/Basic/MacroBuilder.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/TargetParser.h"
using namespace clang;
using namespace clang::targets;
@@ -108,26 +109,92 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
if (ABIName == "ilp32e")
Builder.defineMacro("__riscv_abi_rve");
+ Builder.defineMacro("__riscv_arch_test");
+ Builder.defineMacro("__riscv_i", "2000000");
+
if (HasM) {
+ Builder.defineMacro("__riscv_m", "2000000");
Builder.defineMacro("__riscv_mul");
Builder.defineMacro("__riscv_div");
Builder.defineMacro("__riscv_muldiv");
}
- if (HasA)
+ if (HasA) {
+ Builder.defineMacro("__riscv_a", "2000000");
Builder.defineMacro("__riscv_atomic");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ if (Is64Bit)
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ }
if (HasF || HasD) {
+ Builder.defineMacro("__riscv_f", "2000000");
Builder.defineMacro("__riscv_flen", HasD ? "64" : "32");
Builder.defineMacro("__riscv_fdiv");
Builder.defineMacro("__riscv_fsqrt");
}
- if (HasC)
+ if (HasD)
+ Builder.defineMacro("__riscv_d", "2000000");
+
+ if (HasC) {
+ Builder.defineMacro("__riscv_c", "2000000");
Builder.defineMacro("__riscv_compressed");
+ }
- if (HasB)
+ if (HasB) {
+ Builder.defineMacro("__riscv_b", "93000");
Builder.defineMacro("__riscv_bitmanip");
+ }
+
+ if (HasV) {
+ Builder.defineMacro("__riscv_v", "1000000");
+ Builder.defineMacro("__riscv_vector");
+ }
+
+ if (HasZba)
+ Builder.defineMacro("__riscv_zba", "93000");
+
+ if (HasZbb)
+ Builder.defineMacro("__riscv_zbb", "93000");
+
+ if (HasZbc)
+ Builder.defineMacro("__riscv_zbc", "93000");
+
+ if (HasZbe)
+ Builder.defineMacro("__riscv_zbe", "93000");
+
+ if (HasZbf)
+ Builder.defineMacro("__riscv_zbf", "93000");
+
+ if (HasZbm)
+ Builder.defineMacro("__riscv_zbm", "93000");
+
+ if (HasZbp)
+ Builder.defineMacro("__riscv_zbp", "93000");
+
+ if (HasZbproposedc)
+ Builder.defineMacro("__riscv_zbproposedc", "93000");
+
+ if (HasZbr)
+ Builder.defineMacro("__riscv_zbr", "93000");
+
+ if (HasZbs)
+ Builder.defineMacro("__riscv_zbs", "93000");
+
+ if (HasZbt)
+ Builder.defineMacro("__riscv_zbt", "93000");
+
+ if (HasZfh)
+ Builder.defineMacro("__riscv_zfh", "1000");
+
+ if (HasZvamo)
+ Builder.defineMacro("__riscv_zvamo", "1000000");
+
+ if (HasZvlsseg)
+ Builder.defineMacro("__riscv_zvlsseg", "1000000");
}
/// Return true if has this feature, need to sync with handleTargetFeatures.
@@ -143,6 +210,21 @@ bool RISCVTargetInfo::hasFeature(StringRef Feature) const {
.Case("d", HasD)
.Case("c", HasC)
.Case("experimental-b", HasB)
+ .Case("experimental-v", HasV)
+ .Case("experimental-zba", HasZba)
+ .Case("experimental-zbb", HasZbb)
+ .Case("experimental-zbc", HasZbc)
+ .Case("experimental-zbe", HasZbe)
+ .Case("experimental-zbf", HasZbf)
+ .Case("experimental-zbm", HasZbm)
+ .Case("experimental-zbp", HasZbp)
+ .Case("experimental-zbproposedc", HasZbproposedc)
+ .Case("experimental-zbr", HasZbr)
+ .Case("experimental-zbs", HasZbs)
+ .Case("experimental-zbt", HasZbt)
+ .Case("experimental-zfh", HasZfh)
+ .Case("experimental-zvamo", HasZvamo)
+ .Case("experimental-zvlsseg", HasZvlsseg)
.Default(false);
}
@@ -162,7 +244,79 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasC = true;
else if (Feature == "+experimental-b")
HasB = true;
+ else if (Feature == "+experimental-v")
+ HasV = true;
+ else if (Feature == "+experimental-zba")
+ HasZba = true;
+ else if (Feature == "+experimental-zbb")
+ HasZbb = true;
+ else if (Feature == "+experimental-zbc")
+ HasZbc = true;
+ else if (Feature == "+experimental-zbe")
+ HasZbe = true;
+ else if (Feature == "+experimental-zbf")
+ HasZbf = true;
+ else if (Feature == "+experimental-zbm")
+ HasZbm = true;
+ else if (Feature == "+experimental-zbp")
+ HasZbp = true;
+ else if (Feature == "+experimental-zbproposedc")
+ HasZbproposedc = true;
+ else if (Feature == "+experimental-zbr")
+ HasZbr = true;
+ else if (Feature == "+experimental-zbs")
+ HasZbs = true;
+ else if (Feature == "+experimental-zbt")
+ HasZbt = true;
+ else if (Feature == "+experimental-zfh")
+ HasZfh = true;
+ else if (Feature == "+experimental-zvamo")
+ HasZvamo = true;
+ else if (Feature == "+experimental-zvlsseg")
+ HasZvlsseg = true;
}
return true;
}
+
+bool RISCV32TargetInfo::isValidCPUName(StringRef Name) const {
+ return llvm::RISCV::checkCPUKind(llvm::RISCV::parseCPUKind(Name),
+ /*Is64Bit=*/false);
+}
+
+void RISCV32TargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ llvm::RISCV::fillValidCPUArchList(Values, false);
+}
+
+bool RISCV32TargetInfo::isValidTuneCPUName(StringRef Name) const {
+ return llvm::RISCV::checkTuneCPUKind(
+ llvm::RISCV::parseTuneCPUKind(Name, false),
+ /*Is64Bit=*/false);
+}
+
+void RISCV32TargetInfo::fillValidTuneCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ llvm::RISCV::fillValidTuneCPUArchList(Values, false);
+}
+
+bool RISCV64TargetInfo::isValidCPUName(StringRef Name) const {
+ return llvm::RISCV::checkCPUKind(llvm::RISCV::parseCPUKind(Name),
+ /*Is64Bit=*/true);
+}
+
+void RISCV64TargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ llvm::RISCV::fillValidCPUArchList(Values, true);
+}
+
+bool RISCV64TargetInfo::isValidTuneCPUName(StringRef Name) const {
+ return llvm::RISCV::checkTuneCPUKind(
+ llvm::RISCV::parseTuneCPUKind(Name, true),
+ /*Is64Bit=*/true);
+}
+
+void RISCV64TargetInfo::fillValidTuneCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ llvm::RISCV::fillValidTuneCPUArchList(Values, true);
+}
diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index 73652b409e9c..f1e9215b2d17 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -24,18 +24,32 @@ namespace targets {
// RISC-V Target
class RISCVTargetInfo : public TargetInfo {
protected:
- std::string ABI;
- bool HasM;
- bool HasA;
- bool HasF;
- bool HasD;
- bool HasC;
- bool HasB;
+ std::string ABI, CPU;
+ bool HasM = false;
+ bool HasA = false;
+ bool HasF = false;
+ bool HasD = false;
+ bool HasC = false;
+ bool HasB = false;
+ bool HasV = false;
+ bool HasZba = false;
+ bool HasZbb = false;
+ bool HasZbc = false;
+ bool HasZbe = false;
+ bool HasZbf = false;
+ bool HasZbm = false;
+ bool HasZbp = false;
+ bool HasZbproposedc = false;
+ bool HasZbr = false;
+ bool HasZbs = false;
+ bool HasZbt = false;
+ bool HasZfh = false;
+ bool HasZvamo = false;
+ bool HasZvlsseg = false;
public:
RISCVTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
- : TargetInfo(Triple), HasM(false), HasA(false), HasF(false),
- HasD(false), HasC(false), HasB(false) {
+ : TargetInfo(Triple) {
LongDoubleWidth = 128;
LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
@@ -44,6 +58,13 @@ public:
WIntType = UnsignedInt;
}
+ bool setCPU(const std::string &Name) override {
+ if (!isValidCPUName(Name))
+ return false;
+ CPU = Name;
+ return true;
+ }
+
StringRef getABI() const override { return ABI; }
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -97,6 +118,11 @@ public:
return false;
}
+ bool isValidCPUName(StringRef Name) const override;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+ bool isValidTuneCPUName(StringRef Name) const override;
+ void fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) const override;
+
void setMaxAtomicWidth() override {
MaxAtomicPromoteWidth = 128;
@@ -121,6 +147,11 @@ public:
return false;
}
+ bool isValidCPUName(StringRef Name) const override;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+ bool isValidTuneCPUName(StringRef Name) const override;
+ void fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) const override;
+
void setMaxAtomicWidth() override {
MaxAtomicPromoteWidth = 128;
diff --git a/clang/lib/Basic/Targets/SPIR.h b/clang/lib/Basic/Targets/SPIR.h
index f625d4980e29..a2e812624d37 100644
--- a/clang/lib/Basic/Targets/SPIR.h
+++ b/clang/lib/Basic/Targets/SPIR.h
@@ -28,6 +28,8 @@ static const unsigned SPIRAddrSpaceMap[] = {
2, // opencl_constant
0, // opencl_private
4, // opencl_generic
+ 5, // opencl_global_device
+ 6, // opencl_global_host
0, // cuda_device
0, // cuda_constant
0, // cuda_shared
@@ -98,10 +100,12 @@ public:
void setSupportedOpenCLOpts() override {
// Assume all OpenCL extensions and optional core features are supported
// for SPIR since it is a generic target.
- getSupportedOpenCLOpts().supportAll();
+ supportAllOpenCLOpts();
}
bool hasExtIntType() const override { return true; }
+
+ bool hasInt128Type() const override { return false; }
};
class LLVM_LIBRARY_VISIBILITY SPIR32TargetInfo : public SPIRTargetInfo {
public:
diff --git a/clang/lib/Basic/Targets/Sparc.cpp b/clang/lib/Basic/Targets/Sparc.cpp
index 13aa964d4716..5eeb77406c34 100644
--- a/clang/lib/Basic/Targets/Sparc.cpp
+++ b/clang/lib/Basic/Targets/Sparc.cpp
@@ -147,19 +147,20 @@ void SparcTargetInfo::getTargetDefines(const LangOptions &Opts,
void SparcV8TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
SparcTargetInfo::getTargetDefines(Opts, Builder);
- switch (getCPUGeneration(CPU)) {
- case CG_V8:
+ if (getTriple().getOS() == llvm::Triple::Solaris)
Builder.defineMacro("__sparcv8");
- if (getTriple().getOS() != llvm::Triple::Solaris)
+ else {
+ switch (getCPUGeneration(CPU)) {
+ case CG_V8:
+ Builder.defineMacro("__sparcv8");
Builder.defineMacro("__sparcv8__");
- break;
- case CG_V9:
- Builder.defineMacro("__sparcv9");
- if (getTriple().getOS() != llvm::Triple::Solaris) {
+ break;
+ case CG_V9:
+ Builder.defineMacro("__sparcv9");
Builder.defineMacro("__sparcv9__");
Builder.defineMacro("__sparc_v9__");
+ break;
}
- break;
}
if (getTriple().getVendor() == llvm::Triple::Myriad) {
std::string MyriadArchValue, Myriad2Value;
@@ -227,6 +228,12 @@ void SparcV8TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__myriad2__", Myriad2Value);
Builder.defineMacro("__myriad2", Myriad2Value);
}
+ if (getCPUGeneration(CPU) == CG_V9) {
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ }
}
void SparcV9TargetInfo::getTargetDefines(const LangOptions &Opts,
@@ -240,6 +247,11 @@ void SparcV9TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__sparc_v9__");
Builder.defineMacro("__sparcv9__");
}
+
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
}
void SparcV9TargetInfo::fillValidCPUList(
diff --git a/clang/lib/Basic/Targets/Sparc.h b/clang/lib/Basic/Targets/Sparc.h
index d24cf15d7cd6..07844abafe11 100644
--- a/clang/lib/Basic/Targets/Sparc.h
+++ b/clang/lib/Basic/Targets/Sparc.h
@@ -166,10 +166,15 @@ public:
PtrDiffType = SignedLong;
break;
}
- // Up to 32 bits are lock-free atomic, but we're willing to do atomic ops
- // on up to 64 bits.
+ // Up to 32 bits (V8) or 64 bits (V9) are lock-free atomic, but we're
+ // willing to do atomic ops on up to 64 bits.
MaxAtomicPromoteWidth = 64;
- MaxAtomicInlineWidth = 32;
+ if (getCPUGeneration(CPU) == CG_V9)
+ MaxAtomicInlineWidth = 64;
+ else
+ // FIXME: This isn't correct for plain V8 which lacks CAS,
+ // only for LEON 3+ and Myriad.
+ MaxAtomicInlineWidth = 32;
}
void getTargetDefines(const LangOptions &Opts,
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index d7869e3754a8..39fdcf90d0c8 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -157,6 +157,10 @@ public:
const char *getLongDoubleMangling() const override { return "g"; }
bool hasExtIntType() const override { return true; }
+
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ return RegNo < 4 ? 6 + RegNo : -1;
+ }
};
} // namespace targets
} // namespace clang
diff --git a/clang/lib/Basic/Targets/TCE.h b/clang/lib/Basic/Targets/TCE.h
index 9cbf2a3688a2..445fe4fe7293 100644
--- a/clang/lib/Basic/Targets/TCE.h
+++ b/clang/lib/Basic/Targets/TCE.h
@@ -35,6 +35,8 @@ static const unsigned TCEOpenCLAddrSpaceMap[] = {
4, // opencl_local
5, // opencl_constant
0, // opencl_private
+ 1, // opencl_global_device
+ 1, // opencl_global_host
// FIXME: generic has to be added to the target
0, // opencl_generic
0, // cuda_device
diff --git a/clang/lib/Basic/Targets/VE.h b/clang/lib/Basic/Targets/VE.h
index f863a0af0acb..71d6fc08d859 100644
--- a/clang/lib/Basic/Targets/VE.h
+++ b/clang/lib/Basic/Targets/VE.h
@@ -45,16 +45,15 @@ public:
WCharType = UnsignedInt;
WIntType = UnsignedInt;
UseZeroLengthBitfieldAlignment = true;
- resetDataLayout("e-m:e-i64:64-n32:64-S128");
+ resetDataLayout(
+ "e-m:e-i64:64-n32:64-S128-v64:64:64-v128:64:64-v256:64:64-v512:64:64-"
+ "v1024:64:64-v2048:64:64-v4096:64:64-v8192:64:64-v16384:64:64");
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
- bool hasSjLjLowering() const override {
- // TODO
- return false;
- }
+ bool hasSjLjLowering() const override { return true; }
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
@@ -160,6 +159,13 @@ public:
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default:
+ return false;
+ case 'v':
+ Info.setAllowsRegister();
+ return true;
+ }
return false;
}
diff --git a/clang/lib/Basic/Targets/WebAssembly.cpp b/clang/lib/Basic/Targets/WebAssembly.cpp
index 6746768090f5..dcb3d8fd7790 100644
--- a/clang/lib/Basic/Targets/WebAssembly.cpp
+++ b/clang/lib/Basic/Targets/WebAssembly.cpp
@@ -96,19 +96,43 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
}
void WebAssemblyTargetInfo::setSIMDLevel(llvm::StringMap<bool> &Features,
- SIMDEnum Level) {
+ SIMDEnum Level, bool Enabled) {
+ if (Enabled) {
+ switch (Level) {
+ case UnimplementedSIMD128:
+ Features["unimplemented-simd128"] = true;
+ LLVM_FALLTHROUGH;
+ case SIMD128:
+ Features["simd128"] = true;
+ LLVM_FALLTHROUGH;
+ case NoSIMD:
+ break;
+ }
+ return;
+ }
+
switch (Level) {
- case UnimplementedSIMD128:
- Features["unimplemented-simd128"] = true;
- LLVM_FALLTHROUGH;
+ case NoSIMD:
case SIMD128:
- Features["simd128"] = true;
+ Features["simd128"] = false;
LLVM_FALLTHROUGH;
- case NoSIMD:
+ case UnimplementedSIMD128:
+ Features["unimplemented-simd128"] = false;
break;
}
}
+void WebAssemblyTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const {
+ if (Name == "simd128")
+ setSIMDLevel(Features, SIMD128, Enabled);
+ else if (Name == "unimplemented-simd128")
+ setSIMDLevel(Features, UnimplementedSIMD128, Enabled);
+ else
+ Features[Name] = Enabled;
+}
+
bool WebAssemblyTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeaturesVec) const {
@@ -119,30 +143,8 @@ bool WebAssemblyTargetInfo::initFeatureMap(
Features["atomics"] = true;
Features["mutable-globals"] = true;
Features["tail-call"] = true;
- setSIMDLevel(Features, SIMD128);
+ setSIMDLevel(Features, SIMD128, true);
}
- // Other targets do not consider user-configured features here, but while we
- // are actively developing new features it is useful to let user-configured
- // features control availability of builtins
- setSIMDLevel(Features, SIMDLevel);
- if (HasNontrappingFPToInt)
- Features["nontrapping-fptoint"] = true;
- if (HasSignExt)
- Features["sign-ext"] = true;
- if (HasExceptionHandling)
- Features["exception-handling"] = true;
- if (HasBulkMemory)
- Features["bulk-memory"] = true;
- if (HasAtomics)
- Features["atomics"] = true;
- if (HasMutableGlobals)
- Features["mutable-globals"] = true;
- if (HasMultivalue)
- Features["multivalue"] = true;
- if (HasTailCall)
- Features["tail-call"] = true;
- if (HasReferenceTypes)
- Features["reference-types"] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
diff --git a/clang/lib/Basic/Targets/WebAssembly.h b/clang/lib/Basic/Targets/WebAssembly.h
index 77a2fe9ae117..0068ccb5d71f 100644
--- a/clang/lib/Basic/Targets/WebAssembly.h
+++ b/clang/lib/Basic/Targets/WebAssembly.h
@@ -69,7 +69,8 @@ protected:
MacroBuilder &Builder) const override;
private:
- static void setSIMDLevel(llvm::StringMap<bool> &Features, SIMDEnum Level);
+ static void setSIMDLevel(llvm::StringMap<bool> &Features, SIMDEnum Level,
+ bool Enabled);
bool
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
@@ -77,6 +78,9 @@ private:
const std::vector<std::string> &FeaturesVec) const override;
bool hasFeature(StringRef Feature) const final;
+ void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
+ bool Enabled) const final;
+
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) final;
diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp
index 543f232d2459..694a8095e336 100644
--- a/clang/lib/Basic/Targets/X86.cpp
+++ b/clang/lib/Basic/Targets/X86.cpp
@@ -159,11 +159,7 @@ void X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
}
Features[Name] = Enabled;
-
- SmallVector<StringRef, 8> ImpliedFeatures;
- llvm::X86::getImpliedFeatures(Name, Enabled, ImpliedFeatures);
- for (const auto &F : ImpliedFeatures)
- Features[F] = Enabled;
+ llvm::X86::updateImpliedFeatures(Name, Enabled, Features);
}
/// handleTargetFeatures - Perform initialization based on the user
@@ -280,6 +276,10 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasCLDEMOTE = true;
} else if (Feature == "+rdpid") {
HasRDPID = true;
+ } else if (Feature == "+kl") {
+ HasKL = true;
+ } else if (Feature == "+widekl") {
+ HasWIDEKL = true;
} else if (Feature == "+retpoline-external-thunk") {
HasRetpolineExternalThunk = true;
} else if (Feature == "+sahf") {
@@ -298,16 +298,22 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasINVPCID = true;
} else if (Feature == "+enqcmd") {
HasENQCMD = true;
+ } else if (Feature == "+hreset") {
+ HasHRESET = true;
} else if (Feature == "+amx-bf16") {
HasAMXBF16 = true;
} else if (Feature == "+amx-int8") {
HasAMXINT8 = true;
} else if (Feature == "+amx-tile") {
HasAMXTILE = true;
+ } else if (Feature == "+avxvnni") {
+ HasAVXVNNI = true;
} else if (Feature == "+serialize") {
HasSERIALIZE = true;
} else if (Feature == "+tsxldtrk") {
HasTSXLDTRK = true;
+ } else if (Feature == "+uintr") {
+ HasUINTR = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -463,6 +469,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_IcelakeClient:
case CK_IcelakeServer:
case CK_Tigerlake:
+ case CK_SapphireRapids:
+ case CK_Alderlake:
// FIXME: Historically, we defined this legacy name, it would be nice to
// remove it at some point. We've never exposed fine-grained names for
// recent primary x86 CPUs, and we should keep it that way.
@@ -505,6 +513,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_K8:
case CK_K8SSE3:
case CK_x86_64:
+ case CK_x86_64_v2:
+ case CK_x86_64_v3:
+ case CK_x86_64_v4:
defineCPUMacros(Builder, "k8");
break;
case CK_AMDFAM10:
@@ -534,6 +545,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_ZNVER2:
defineCPUMacros(Builder, "znver2");
break;
+ case CK_ZNVER3:
+ defineCPUMacros(Builder, "znver3");
+ break;
case CK_Geode:
defineCPUMacros(Builder, "geode");
break;
@@ -559,6 +573,11 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasVPCLMULQDQ)
Builder.defineMacro("__VPCLMULQDQ__");
+ // Note, in 32-bit mode, GCC does not define the macro if -mno-sahf. In LLVM,
+ // the feature flag only applies to 64-bit mode.
+ if (HasLAHFSAHF || getTriple().getArch() == llvm::Triple::x86)
+ Builder.defineMacro("__LAHF_SAHF__");
+
if (HasLZCNT)
Builder.defineMacro("__LZCNT__");
@@ -681,6 +700,10 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__PREFETCHWT1__");
if (HasCLZERO)
Builder.defineMacro("__CLZERO__");
+ if (HasKL)
+ Builder.defineMacro("__KL__");
+ if (HasWIDEKL)
+ Builder.defineMacro("__WIDEKL__");
if (HasRDPID)
Builder.defineMacro("__RDPID__");
if (HasCLDEMOTE)
@@ -699,16 +722,22 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__INVPCID__");
if (HasENQCMD)
Builder.defineMacro("__ENQCMD__");
+ if (HasHRESET)
+ Builder.defineMacro("__HRESET__");
if (HasAMXTILE)
Builder.defineMacro("__AMXTILE__");
if (HasAMXINT8)
Builder.defineMacro("__AMXINT8__");
if (HasAMXBF16)
Builder.defineMacro("__AMXBF16__");
+ if (HasAVXVNNI)
+ Builder.defineMacro("__AVXVNNI__");
if (HasSERIALIZE)
Builder.defineMacro("__SERIALIZE__");
if (HasTSXLDTRK)
Builder.defineMacro("__TSXLDTRK__");
+ if (HasUINTR)
+ Builder.defineMacro("__UINTR__");
// Each case falls through to the previous one here.
switch (SSELevel) {
@@ -821,6 +850,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("avx512vbmi2", true)
.Case("avx512ifma", true)
.Case("avx512vp2intersect", true)
+ .Case("avxvnni", true)
.Case("bmi", true)
.Case("bmi2", true)
.Case("cldemote", true)
@@ -835,7 +865,10 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("fsgsbase", true)
.Case("fxsr", true)
.Case("gfni", true)
+ .Case("hreset", true)
.Case("invpcid", true)
+ .Case("kl", true)
+ .Case("widekl", true)
.Case("lwp", true)
.Case("lzcnt", true)
.Case("mmx", true)
@@ -869,6 +902,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("sse4a", true)
.Case("tbm", true)
.Case("tsxldtrk", true)
+ .Case("uintr", true)
.Case("vaes", true)
.Case("vpclmulqdq", true)
.Case("wbnoinvd", true)
@@ -889,6 +923,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("amx-bf16", HasAMXBF16)
.Case("amx-int8", HasAMXINT8)
.Case("amx-tile", HasAMXTILE)
+ .Case("avxvnni", HasAVXVNNI)
.Case("avx", SSELevel >= AVX)
.Case("avx2", SSELevel >= AVX2)
.Case("avx512f", SSELevel >= AVX512F)
@@ -921,7 +956,10 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("fsgsbase", HasFSGSBASE)
.Case("fxsr", HasFXSR)
.Case("gfni", HasGFNI)
+ .Case("hreset", HasHRESET)
.Case("invpcid", HasINVPCID)
+ .Case("kl", HasKL)
+ .Case("widekl", HasWIDEKL)
.Case("lwp", HasLWP)
.Case("lzcnt", HasLZCNT)
.Case("mm3dnow", MMX3DNowLevel >= AMD3DNow)
@@ -957,6 +995,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("sse4a", XOPLevel >= SSE4A)
.Case("tbm", HasTBM)
.Case("tsxldtrk", HasTSXLDTRK)
+ .Case("uintr", HasUINTR)
.Case("vaes", HasVAES)
.Case("vpclmulqdq", HasVPCLMULQDQ)
.Case("wbnoinvd", HasWBNOINVD)
@@ -1273,8 +1312,10 @@ Optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
case CK_Cooperlake:
case CK_Cannonlake:
case CK_Tigerlake:
+ case CK_SapphireRapids:
case CK_IcelakeClient:
case CK_IcelakeServer:
+ case CK_Alderlake:
case CK_KNL:
case CK_KNM:
// K7
@@ -1295,8 +1336,12 @@ Optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
// Zen
case CK_ZNVER1:
case CK_ZNVER2:
+ case CK_ZNVER3:
// Deprecated
case CK_x86_64:
+ case CK_x86_64_v2:
+ case CK_x86_64_v3:
+ case CK_x86_64_v4:
case CK_Yonah:
case CK_Penryn:
case CK_Core2:
@@ -1440,6 +1485,10 @@ void X86TargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
llvm::X86::fillValidCPUArchList(Values, Only64Bit);
}
+void X86TargetInfo::fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) const {
+ llvm::X86::fillValidTuneCPUList(Values);
+}
+
ArrayRef<const char *> X86TargetInfo::getGCCRegNames() const {
return llvm::makeArrayRef(GCCRegNames);
}
diff --git a/clang/lib/Basic/Targets/X86.h b/clang/lib/Basic/Targets/X86.h
index 72a01d2514c2..91a365c7d405 100644
--- a/clang/lib/Basic/Targets/X86.h
+++ b/clang/lib/Basic/Targets/X86.h
@@ -30,6 +30,8 @@ static const unsigned X86AddrSpaceMap[] = {
0, // opencl_constant
0, // opencl_private
0, // opencl_generic
+ 0, // opencl_global_device
+ 0, // opencl_global_host
0, // cuda_device
0, // cuda_constant
0, // cuda_shared
@@ -125,11 +127,16 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasPTWRITE = false;
bool HasINVPCID = false;
bool HasENQCMD = false;
+ bool HasKL = false; // For key locker
+ bool HasWIDEKL = false; // For wide key locker
+ bool HasHRESET = false;
+ bool HasAVXVNNI = false;
bool HasAMXTILE = false;
bool HasAMXINT8 = false;
bool HasAMXBF16 = false;
bool HasSERIALIZE = false;
bool HasTSXLDTRK = false;
+ bool HasUINTR = false;
protected:
llvm::X86::CPUKind CPU = llvm::X86::CK_None;
@@ -142,6 +149,11 @@ public:
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
AddrSpaceMap = &X86AddrSpaceMap;
HasStrictFP = true;
+
+ bool IsWinCOFF =
+ getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF();
+ if (IsWinCOFF)
+ MaxVectorAlign = MaxTLSAlign = 8192u * getCharWidth();
}
const char *getLongDoubleMangling() const override {
@@ -289,12 +301,27 @@ public:
return "";
}
+ bool supportsTargetAttributeTune() const override {
+ return true;
+ }
+
bool isValidCPUName(StringRef Name) const override {
bool Only64Bit = getTriple().getArch() != llvm::Triple::x86;
return llvm::X86::parseArchX86(Name, Only64Bit) != llvm::X86::CK_None;
}
+ bool isValidTuneCPUName(StringRef Name) const override {
+ if (Name == "generic")
+ return true;
+
+ // Allow 32-bit only CPUs regardless of 64-bit mode unlike isValidCPUName.
+ // NOTE: gcc rejects 32-bit mtune CPUs in 64-bit mode. But being lenient
+ // since mtune was ignored by clang for so long.
+ return llvm::X86::parseTuneCPU(Name) != llvm::X86::CK_None;
+ }
+
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+ void fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override {
bool Only64Bit = getTriple().getArch() != llvm::Triple::x86;
@@ -332,9 +359,7 @@ public:
bool hasSjLjLowering() const override { return true; }
- void setSupportedOpenCLOpts() override {
- getSupportedOpenCLOpts().supportAll();
- }
+ void setSupportedOpenCLOpts() override { supportAllOpenCLOpts(); }
uint64_t getPointerWidthV(unsigned AddrSpace) const override {
if (AddrSpace == ptr32_sptr || AddrSpace == ptr32_uptr)
@@ -358,7 +383,10 @@ public:
LongDoubleWidth = 96;
LongDoubleAlign = 32;
SuitableAlign = 128;
- resetDataLayout("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
+ resetDataLayout(Triple.isOSBinFormatMachO() ?
+ "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
+ "f80:32-n8:16:32-S128" :
+ "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
"f80:32-n8:16:32-S128");
SizeType = UnsignedInt;
PtrDiffType = SignedInt;
diff --git a/clang/lib/Basic/Version.cpp b/clang/lib/Basic/Version.cpp
index 286107cab9d7..af3118b0f6da 100644
--- a/clang/lib/Basic/Version.cpp
+++ b/clang/lib/Basic/Version.cpp
@@ -17,9 +17,7 @@
#include <cstdlib>
#include <cstring>
-#ifdef HAVE_VCS_VERSION_INC
#include "VCSVersion.inc"
-#endif
namespace clang {
diff --git a/clang/lib/Basic/Warnings.cpp b/clang/lib/Basic/Warnings.cpp
index 2c909d9510d4..cc8c138233ca 100644
--- a/clang/lib/Basic/Warnings.cpp
+++ b/clang/lib/Basic/Warnings.cpp
@@ -130,11 +130,14 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
}
// -Werror/-Wno-error is a special case, not controlled by the option
- // table. It also has the "specifier" form of -Werror=foo and -Werror-foo.
+ // table. It also has the "specifier" form of -Werror=foo. GCC supports
+ // the deprecated -Werror-implicit-function-declaration which is used by
+ // a few projects.
if (Opt.startswith("error")) {
StringRef Specifier;
if (Opt.size() > 5) { // Specifier must be present.
- if ((Opt[5] != '=' && Opt[5] != '-') || Opt.size() == 6) {
+ if (Opt[5] != '=' &&
+ Opt.substr(5) != "-implicit-function-declaration") {
if (Report)
Diags.Report(diag::warn_unknown_warning_specifier)
<< "-Werror" << ("-W" + OrigOpt.str());
diff --git a/clang/lib/CodeGen/ABIInfo.h b/clang/lib/CodeGen/ABIInfo.h
index bb40dace8a84..56f0dd4322d2 100644
--- a/clang/lib/CodeGen/ABIInfo.h
+++ b/clang/lib/CodeGen/ABIInfo.h
@@ -111,7 +111,7 @@ namespace swiftcall {
/// A convenience method to return an indirect ABIArgInfo with an
/// expected alignment equal to the ABI alignment of the given type.
CodeGen::ABIArgInfo
- getNaturalAlignIndirect(QualType Ty, bool ByRef = true,
+ getNaturalAlignIndirect(QualType Ty, bool ByVal = true,
bool Realign = false,
llvm::Type *Padding = nullptr) const;
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index dce0940670a2..52bcd971dc8c 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -65,15 +66,18 @@
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/BoundsChecking.h"
+#include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h"
#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
+#include "llvm/Transforms/Instrumentation/MemProfiler.h"
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
#include "llvm/Transforms/Instrumentation/SanitizerCoverage.h"
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/Transforms/ObjCARC.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
+#include "llvm/Transforms/Scalar/LowerMatrixIntrinsics.h"
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
@@ -255,6 +259,8 @@ static bool asanUseGlobalsGC(const Triple &T, const CodeGenOptions &CGOpts) {
return true;
case Triple::ELF:
return CGOpts.DataSections && !CGOpts.DisableIntegratedAS;
+ case Triple::GOFF:
+ llvm::report_fatal_error("ASan not implemented for GOFF");
case Triple::XCOFF:
llvm::report_fatal_error("ASan not implemented for XCOFF.");
case Triple::Wasm:
@@ -264,6 +270,12 @@ static bool asanUseGlobalsGC(const Triple &T, const CodeGenOptions &CGOpts) {
return false;
}
+static void addMemProfilerPasses(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createMemProfilerFunctionPass());
+ PM.add(createModuleMemProfilerLegacyPassPass());
+}
+
static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
const PassManagerBuilderWrapper &BuilderWrapper =
@@ -349,7 +361,8 @@ static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder,
const PassManagerBuilderWrapper &BuilderWrapper =
static_cast<const PassManagerBuilderWrapper&>(Builder);
const LangOptions &LangOpts = BuilderWrapper.getLangOpts();
- PM.add(createDataFlowSanitizerPass(LangOpts.SanitizerBlacklistFiles));
+ PM.add(
+ createDataFlowSanitizerLegacyPassPass(LangOpts.SanitizerBlacklistFiles));
}
static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
@@ -360,6 +373,16 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
case CodeGenOptions::Accelerate:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::Accelerate);
break;
+ case CodeGenOptions::LIBMVEC:
+ switch(TargetTriple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86_64:
+ TLII->addVectorizableFunctionsFromVecLib
+ (TargetLibraryInfoImpl::LIBMVEC_X86);
+ break;
+ }
+ break;
case CodeGenOptions::MASSV:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::MASSV);
break;
@@ -425,16 +448,20 @@ static CodeGenFileType getCodeGenFileType(BackendAction Action) {
}
}
-static void initTargetOptions(DiagnosticsEngine &Diags,
+static bool initTargetOptions(DiagnosticsEngine &Diags,
llvm::TargetOptions &Options,
const CodeGenOptions &CodeGenOpts,
const clang::TargetOptions &TargetOpts,
const LangOptions &LangOpts,
const HeaderSearchOptions &HSOpts) {
- Options.ThreadModel =
- llvm::StringSwitch<llvm::ThreadModel::Model>(CodeGenOpts.ThreadModel)
- .Case("posix", llvm::ThreadModel::POSIX)
- .Case("single", llvm::ThreadModel::Single);
+ switch (LangOpts.getThreadModel()) {
+ case LangOptions::ThreadModelKind::POSIX:
+ Options.ThreadModel = llvm::ThreadModel::POSIX;
+ break;
+ case LangOptions::ThreadModelKind::Single:
+ Options.ThreadModel = llvm::ThreadModel::Single;
+ break;
+ }
// Set float ABI type.
assert((CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp" ||
@@ -455,6 +482,7 @@ static void initTargetOptions(DiagnosticsEngine &Diags,
Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
break;
case LangOptions::FPM_On:
+ case LangOptions::FPM_FastHonorPragmas:
Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
break;
case LangOptions::FPM_Fast:
@@ -462,6 +490,8 @@ static void initTargetOptions(DiagnosticsEngine &Diags,
break;
}
+ Options.BinutilsVersion =
+ llvm::TargetMachine::parseBinutilsVersion(CodeGenOpts.BinutilsVersion);
Options.UseInitArray = CodeGenOpts.UseInitArray;
Options.DisableIntegratedAS = CodeGenOpts.DisableIntegratedAS;
Options.CompressDebugSections = CodeGenOpts.getCompressDebugSections();
@@ -470,13 +500,13 @@ static void initTargetOptions(DiagnosticsEngine &Diags,
// Set EABI version.
Options.EABIVersion = TargetOpts.EABIVersion;
- if (LangOpts.SjLjExceptions)
+ if (LangOpts.hasSjLjExceptions())
Options.ExceptionModel = llvm::ExceptionHandling::SjLj;
- if (LangOpts.SEHExceptions)
+ if (LangOpts.hasSEHExceptions())
Options.ExceptionModel = llvm::ExceptionHandling::WinEH;
- if (LangOpts.DWARFExceptions)
+ if (LangOpts.hasDWARFExceptions())
Options.ExceptionModel = llvm::ExceptionHandling::DwarfCFI;
- if (LangOpts.WasmExceptions)
+ if (LangOpts.hasWasmExceptions())
Options.ExceptionModel = llvm::ExceptionHandling::Wasm;
Options.NoInfsFPMath = LangOpts.NoHonorInfs;
@@ -496,18 +526,29 @@ static void initTargetOptions(DiagnosticsEngine &Diags,
if (Options.BBSections == llvm::BasicBlockSection::List) {
ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr =
MemoryBuffer::getFile(CodeGenOpts.BBSections.substr(5));
- if (!MBOrErr)
+ if (!MBOrErr) {
Diags.Report(diag::err_fe_unable_to_load_basic_block_sections_file)
<< MBOrErr.getError().message();
- else
- Options.BBSectionsFuncListBuf = std::move(*MBOrErr);
+ return false;
+ }
+ Options.BBSectionsFuncListBuf = std::move(*MBOrErr);
}
+ Options.EnableMachineFunctionSplitter = CodeGenOpts.SplitMachineFunctions;
Options.FunctionSections = CodeGenOpts.FunctionSections;
Options.DataSections = CodeGenOpts.DataSections;
+ Options.IgnoreXCOFFVisibility = CodeGenOpts.IgnoreXCOFFVisibility;
Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames;
Options.UniqueBasicBlockSectionNames =
CodeGenOpts.UniqueBasicBlockSectionNames;
+ Options.StackProtectorGuard =
+ llvm::StringSwitch<llvm::StackProtectorGuards>(CodeGenOpts
+ .StackProtectorGuard)
+ .Case("tls", llvm::StackProtectorGuards::TLS)
+ .Case("global", llvm::StackProtectorGuards::Global)
+ .Default(llvm::StackProtectorGuards::None);
+ Options.StackProtectorGuardOffset = CodeGenOpts.StackProtectorGuardOffset;
+ Options.StackProtectorGuardReg = CodeGenOpts.StackProtectorGuardReg;
Options.TLSSize = CodeGenOpts.TLSSize;
Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
Options.ExplicitEmulatedTLS = CodeGenOpts.ExplicitEmulatedTLS;
@@ -516,6 +557,10 @@ static void initTargetOptions(DiagnosticsEngine &Diags,
Options.EmitAddrsig = CodeGenOpts.Addrsig;
Options.ForceDwarfFrameSection = CodeGenOpts.ForceDwarfFrameSection;
Options.EmitCallSiteInfo = CodeGenOpts.EmitCallSiteInfo;
+ Options.EnableAIXExtendedAltivecABI = CodeGenOpts.EnableAIXExtendedAltivecABI;
+ Options.PseudoProbeForProfiling = CodeGenOpts.PseudoProbeForProfiling;
+ Options.ValueTrackingVariableLocations =
+ CodeGenOpts.ValueTrackingVariableLocations;
Options.XRayOmitFunctionIndex = CodeGenOpts.XRayOmitFunctionIndex;
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
@@ -528,6 +573,7 @@ static void initTargetOptions(DiagnosticsEngine &Diags,
Options.MCOptions.MCFatalWarnings = CodeGenOpts.FatalWarnings;
Options.MCOptions.MCNoWarn = CodeGenOpts.NoWarn;
Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
+ Options.MCOptions.Dwarf64 = CodeGenOpts.Dwarf64;
Options.MCOptions.PreserveAsmComments = CodeGenOpts.PreserveAsmComments;
Options.MCOptions.ABIName = TargetOpts.ABI;
for (const auto &Entry : HSOpts.UserEntries)
@@ -539,10 +585,12 @@ static void initTargetOptions(DiagnosticsEngine &Diags,
Entry.IgnoreSysRoot ? Entry.Path : HSOpts.Sysroot + Entry.Path);
Options.MCOptions.Argv0 = CodeGenOpts.Argv0;
Options.MCOptions.CommandLineArgs = CodeGenOpts.CommandLineArgs;
+
+ return true;
}
-static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts) {
- if (CodeGenOpts.DisableGCov)
- return None;
+
+static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts,
+ const LangOptions &LangOpts) {
if (!CodeGenOpts.EmitGcovArcs && !CodeGenOpts.EmitGcovNotes)
return None;
// Not using 'GCOVOptions::getDefault' allows us to avoid exiting if
@@ -554,6 +602,7 @@ static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts) {
Options.NoRedZone = CodeGenOpts.DisableRedZone;
Options.Filter = CodeGenOpts.ProfileFilterFiles;
Options.Exclude = CodeGenOpts.ProfileExcludeFiles;
+ Options.Atomic = CodeGenOpts.AtomicProfileUpdate;
return Options;
}
@@ -565,10 +614,7 @@ getInstrProfOptions(const CodeGenOptions &CodeGenOpts,
InstrProfOptions Options;
Options.NoRedZone = CodeGenOpts.DisableRedZone;
Options.InstrProfileOutput = CodeGenOpts.InstrProfileOutput;
-
- // TODO: Surface the option to emit atomic profile counter increments at
- // the driver level.
- Options.Atomic = LangOpts.Sanitize.has(SanitizerKind::Thread);
+ Options.Atomic = CodeGenOpts.AtomicProfileUpdate;
return Options;
}
@@ -656,6 +702,13 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (LangOpts.Coroutines)
addCoroutinePassesToExtensionPoints(PMBuilder);
+ if (!CodeGenOpts.MemoryProfileOutput.empty()) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addMemProfilerPasses);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addMemProfilerPasses);
+ }
+
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds)) {
PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
addBoundsCheckingPass);
@@ -743,7 +796,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
MPM.add(createUniqueInternalLinkageNamesPass());
}
- if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts)) {
+ if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts, LangOpts)) {
MPM.add(createGCOVProfilerPass(*Options));
if (CodeGenOpts.getDebugInfo() == codegenoptions::NoDebugInfo)
MPM.add(createStripSymbolsPass(true));
@@ -820,7 +873,9 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
CodeGenOpt::Level OptLevel = getCGOptLevel(CodeGenOpts);
llvm::TargetOptions Options;
- initTargetOptions(Diags, Options, CodeGenOpts, TargetOpts, LangOpts, HSOpts);
+ if (!initTargetOptions(Diags, Options, CodeGenOpts, TargetOpts, LangOpts,
+ HSOpts))
+ return;
TM.reset(TheTarget->createTargetMachine(Triple, TargetOpts.CPU, FeaturesStr,
Options, RM, CM, OptLevel));
}
@@ -856,7 +911,7 @@ bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS) {
- TimeRegion Region(FrontendTimesIsEnabled ? &CodeGenerationTime : nullptr);
+ TimeRegion Region(CodeGenOpts.TimePasses ? &CodeGenerationTime : nullptr);
setCommandLineOpts(CodeGenOpts);
@@ -977,6 +1032,9 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
default:
llvm_unreachable("Invalid optimization level!");
+ case 0:
+ return PassBuilder::OptimizationLevel::O0;
+
case 1:
return PassBuilder::OptimizationLevel::O1;
@@ -1000,73 +1058,6 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
}
}
-static void addCoroutinePassesAtO0(ModulePassManager &MPM,
- const LangOptions &LangOpts,
- const CodeGenOptions &CodeGenOpts) {
- if (!LangOpts.Coroutines)
- return;
-
- MPM.addPass(createModuleToFunctionPassAdaptor(CoroEarlyPass()));
-
- CGSCCPassManager CGPM(CodeGenOpts.DebugPassManager);
- CGPM.addPass(CoroSplitPass());
- CGPM.addPass(createCGSCCToFunctionPassAdaptor(CoroElidePass()));
- MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
-
- MPM.addPass(createModuleToFunctionPassAdaptor(CoroCleanupPass()));
-}
-
-static void addSanitizersAtO0(ModulePassManager &MPM,
- const Triple &TargetTriple,
- const LangOptions &LangOpts,
- const CodeGenOptions &CodeGenOpts) {
- if (CodeGenOpts.SanitizeCoverageType ||
- CodeGenOpts.SanitizeCoverageIndirectCalls ||
- CodeGenOpts.SanitizeCoverageTraceCmp) {
- auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
- MPM.addPass(ModuleSanitizerCoveragePass(
- SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles,
- CodeGenOpts.SanitizeCoverageBlocklistFiles));
- }
-
- auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
- MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
- bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
- MPM.addPass(createModuleToFunctionPassAdaptor(AddressSanitizerPass(
- CompileKernel, Recover, CodeGenOpts.SanitizeAddressUseAfterScope)));
- bool ModuleUseAfterScope = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
- MPM.addPass(
- ModuleAddressSanitizerPass(CompileKernel, Recover, ModuleUseAfterScope,
- CodeGenOpts.SanitizeAddressUseOdrIndicator));
- };
-
- if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
- ASanPass(SanitizerKind::Address, /*CompileKernel=*/false);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::KernelAddress)) {
- ASanPass(SanitizerKind::KernelAddress, /*CompileKernel=*/true);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
- bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Memory);
- int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
- MPM.addPass(MemorySanitizerPass({TrackOrigins, Recover, false}));
- MPM.addPass(createModuleToFunctionPassAdaptor(
- MemorySanitizerPass({TrackOrigins, Recover, false})));
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::KernelMemory)) {
- MPM.addPass(createModuleToFunctionPassAdaptor(
- MemorySanitizerPass({0, false, /*Kernel=*/true})));
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
- MPM.addPass(ThreadSanitizerPass());
- MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
- }
-}
-
/// A clean version of `EmitAssembly` that uses the new pass manager.
///
/// Not all features are currently supported in this system, but where
@@ -1077,7 +1068,7 @@ static void addSanitizersAtO0(ModulePassManager &MPM,
/// `EmitAssembly` at some point in the future when the default switches.
void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
BackendAction Action, std::unique_ptr<raw_pwrite_stream> OS) {
- TimeRegion Region(FrontendTimesIsEnabled ? &CodeGenerationTime : nullptr);
+ TimeRegion Region(CodeGenOpts.TimePasses ? &CodeGenerationTime : nullptr);
setCommandLineOpts(CodeGenOpts);
bool RequiresCodeGen = (Action != Backend_EmitNothing &&
@@ -1108,10 +1099,15 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
CSAction, CodeGenOpts.DebugInfoForProfiling);
} else if (!CodeGenOpts.SampleProfileFile.empty())
// -fprofile-sample-use
+ PGOOpt = PGOOptions(
+ CodeGenOpts.SampleProfileFile, "", CodeGenOpts.ProfileRemappingFile,
+ PGOOptions::SampleUse, PGOOptions::NoCSAction,
+ CodeGenOpts.DebugInfoForProfiling, CodeGenOpts.PseudoProbeForProfiling);
+ else if (CodeGenOpts.PseudoProbeForProfiling)
+ // -fpseudo-probe-for-profiling
PGOOpt =
- PGOOptions(CodeGenOpts.SampleProfileFile, "",
- CodeGenOpts.ProfileRemappingFile, PGOOptions::SampleUse,
- PGOOptions::NoCSAction, CodeGenOpts.DebugInfoForProfiling);
+ PGOOptions("", "", "", PGOOptions::NoAction, PGOOptions::NoCSAction,
+ CodeGenOpts.DebugInfoForProfiling, true);
else if (CodeGenOpts.DebugInfoForProfiling)
// -fdebug-info-for-profiling
PGOOpt = PGOOptions("", "", "", PGOOptions::NoAction,
@@ -1147,15 +1143,17 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
PTO.LoopInterleaving = CodeGenOpts.UnrollLoops;
PTO.LoopVectorization = CodeGenOpts.VectorizeLoop;
PTO.SLPVectorization = CodeGenOpts.VectorizeSLP;
+ PTO.MergeFunctions = CodeGenOpts.MergeFunctions;
// Only enable CGProfilePass when using integrated assembler, since
// non-integrated assemblers don't recognize .cgprofile section.
PTO.CallGraphProfile = !CodeGenOpts.DisableIntegratedAS;
PTO.Coroutines = LangOpts.Coroutines;
+ PTO.UniqueLinkageNames = CodeGenOpts.UniqueInternalLinkageNames;
PassInstrumentationCallbacks PIC;
- StandardInstrumentations SI;
+ StandardInstrumentations SI(CodeGenOpts.DebugPassManager);
SI.registerCallbacks(PIC);
- PassBuilder PB(TM.get(), PTO, PGOOpt, &PIC);
+ PassBuilder PB(CodeGenOpts.DebugPassManager, TM.get(), PTO, PGOOpt, &PIC);
// Attempt to load pass plugins and register their callbacks with PB.
for (auto &PluginFN : CodeGenOpts.PassPlugins) {
@@ -1196,184 +1194,164 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
ModulePassManager MPM(CodeGenOpts.DebugPassManager);
if (!CodeGenOpts.DisableLLVMPasses) {
+ // Map our optimization levels into one of the distinct levels used to
+ // configure the pipeline.
+ PassBuilder::OptimizationLevel Level = mapToLevel(CodeGenOpts);
+
bool IsThinLTO = CodeGenOpts.PrepareForThinLTO;
bool IsLTO = CodeGenOpts.PrepareForLTO;
- if (CodeGenOpts.OptimizationLevel == 0) {
- // If we reached here with a non-empty index file name, then the index
- // file was empty and we are not performing ThinLTO backend compilation
- // (used in testing in a distributed build environment). Drop any the type
- // test assume sequences inserted for whole program vtables so that
- // codegen doesn't complain.
- if (!CodeGenOpts.ThinLTOIndexFile.empty())
- MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
- /*ImportSummary=*/nullptr,
- /*DropTypeTests=*/true));
- if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts))
- MPM.addPass(GCOVProfilerPass(*Options));
- if (Optional<InstrProfOptions> Options =
- getInstrProfOptions(CodeGenOpts, LangOpts))
- MPM.addPass(InstrProfiling(*Options, false));
-
- // Build a minimal pipeline based on the semantics required by Clang,
- // which is just that always inlining occurs. Further, disable generating
- // lifetime intrinsics to avoid enabling further optimizations during
- // code generation.
- // However, we need to insert lifetime intrinsics to avoid invalid access
- // caused by multithreaded coroutines.
- MPM.addPass(
- AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/LangOpts.Coroutines));
-
- // At -O0, we can still do PGO. Add all the requested passes for
- // instrumentation PGO, if requested.
- if (PGOOpt && (PGOOpt->Action == PGOOptions::IRInstr ||
- PGOOpt->Action == PGOOptions::IRUse))
- PB.addPGOInstrPassesForO0(
- MPM, CodeGenOpts.DebugPassManager,
- /* RunProfileGen */ (PGOOpt->Action == PGOOptions::IRInstr),
- /* IsCS */ false, PGOOpt->ProfileFile,
- PGOOpt->ProfileRemappingFile);
-
- // At -O0 we directly run necessary sanitizer passes.
- if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
- MPM.addPass(createModuleToFunctionPassAdaptor(BoundsCheckingPass()));
-
- // Add UniqueInternalLinkageNames Pass which renames internal linkage
- // symbols with unique names.
- if (CodeGenOpts.UniqueInternalLinkageNames) {
- MPM.addPass(UniqueInternalLinkageNamesPass());
- }
+ if (LangOpts.ObjCAutoRefCount) {
+ PB.registerPipelineStartEPCallback(
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ if (Level != PassBuilder::OptimizationLevel::O0)
+ MPM.addPass(
+ createModuleToFunctionPassAdaptor(ObjCARCExpandPass()));
+ });
+ PB.registerPipelineEarlySimplificationEPCallback(
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ if (Level != PassBuilder::OptimizationLevel::O0)
+ MPM.addPass(ObjCARCAPElimPass());
+ });
+ PB.registerScalarOptimizerLateEPCallback(
+ [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ if (Level != PassBuilder::OptimizationLevel::O0)
+ FPM.addPass(ObjCARCOptPass());
+ });
+ }
- // Lastly, add semantically necessary passes for LTO.
- if (IsLTO || IsThinLTO) {
- MPM.addPass(CanonicalizeAliasesPass());
- MPM.addPass(NameAnonGlobalPass());
- }
- } else {
- // Map our optimization levels into one of the distinct levels used to
- // configure the pipeline.
- PassBuilder::OptimizationLevel Level = mapToLevel(CodeGenOpts);
-
- // If we reached here with a non-empty index file name, then the index
- // file was empty and we are not performing ThinLTO backend compilation
- // (used in testing in a distributed build environment). Drop any the type
- // test assume sequences inserted for whole program vtables so that
- // codegen doesn't complain.
- if (!CodeGenOpts.ThinLTOIndexFile.empty())
- PB.registerPipelineStartEPCallback([](ModulePassManager &MPM) {
- MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
- /*ImportSummary=*/nullptr,
- /*DropTypeTests=*/true));
- });
-
- PB.registerPipelineStartEPCallback([](ModulePassManager &MPM) {
- MPM.addPass(createModuleToFunctionPassAdaptor(
- EntryExitInstrumenterPass(/*PostInlining=*/false)));
- });
+ // If we reached here with a non-empty index file name, then the index
+ // file was empty and we are not performing ThinLTO backend compilation
+ // (used in testing in a distributed build environment). Drop any the type
+ // test assume sequences inserted for whole program vtables so that
+ // codegen doesn't complain.
+ if (!CodeGenOpts.ThinLTOIndexFile.empty())
+ PB.registerPipelineStartEPCallback(
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
+ /*ImportSummary=*/nullptr,
+ /*DropTypeTests=*/true));
+ });
+
+ if (Level != PassBuilder::OptimizationLevel::O0) {
+ PB.registerPipelineStartEPCallback(
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ EntryExitInstrumenterPass(/*PostInlining=*/false)));
+ });
+ }
- // Register callbacks to schedule sanitizer passes at the appropriate part of
- // the pipeline.
- if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
- PB.registerScalarOptimizerLateEPCallback(
- [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
- FPM.addPass(BoundsCheckingPass());
- });
+ // Register callbacks to schedule sanitizer passes at the appropriate part
+ // of the pipeline.
+ if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
+ PB.registerScalarOptimizerLateEPCallback(
+ [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ FPM.addPass(BoundsCheckingPass());
+ });
+
+ if (CodeGenOpts.SanitizeCoverageType ||
+ CodeGenOpts.SanitizeCoverageIndirectCalls ||
+ CodeGenOpts.SanitizeCoverageTraceCmp) {
+ PB.registerOptimizerLastEPCallback(
+ [this](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
+ MPM.addPass(ModuleSanitizerCoveragePass(
+ SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles,
+ CodeGenOpts.SanitizeCoverageBlocklistFiles));
+ });
+ }
- if (CodeGenOpts.SanitizeCoverageType ||
- CodeGenOpts.SanitizeCoverageIndirectCalls ||
- CodeGenOpts.SanitizeCoverageTraceCmp) {
- PB.registerOptimizerLastEPCallback(
- [this](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
- auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
- MPM.addPass(ModuleSanitizerCoveragePass(
- SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles,
- CodeGenOpts.SanitizeCoverageBlocklistFiles));
- });
- }
+ if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
+ int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
+ bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Memory);
+ PB.registerOptimizerLastEPCallback(
+ [TrackOrigins, Recover](ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(MemorySanitizerPass({TrackOrigins, Recover, false}));
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ MemorySanitizerPass({TrackOrigins, Recover, false})));
+ });
+ }
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
+ PB.registerOptimizerLastEPCallback(
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(ThreadSanitizerPass());
+ MPM.addPass(
+ createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
+ });
+ }
- if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
- int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
- bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Memory);
- PB.registerOptimizerLastEPCallback(
- [TrackOrigins, Recover](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
- MPM.addPass(MemorySanitizerPass({TrackOrigins, Recover, false}));
- MPM.addPass(createModuleToFunctionPassAdaptor(
- MemorySanitizerPass({TrackOrigins, Recover, false})));
- });
- }
- if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
- PB.registerOptimizerLastEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
- MPM.addPass(ThreadSanitizerPass());
- MPM.addPass(
- createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
- });
- }
- if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
- bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Address);
+ auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
+ if (LangOpts.Sanitize.has(Mask)) {
+ bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
bool UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
bool ModuleUseAfterScope = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
bool UseOdrIndicator = CodeGenOpts.SanitizeAddressUseOdrIndicator;
PB.registerOptimizerLastEPCallback(
- [Recover, UseAfterScope, ModuleUseAfterScope, UseOdrIndicator](
- ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [CompileKernel, Recover, UseAfterScope, ModuleUseAfterScope,
+ UseOdrIndicator](ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level) {
MPM.addPass(
RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
- MPM.addPass(ModuleAddressSanitizerPass(
- /*CompileKernel=*/false, Recover, ModuleUseAfterScope,
- UseOdrIndicator));
- MPM.addPass(
- createModuleToFunctionPassAdaptor(AddressSanitizerPass(
- /*CompileKernel=*/false, Recover, UseAfterScope)));
+ MPM.addPass(ModuleAddressSanitizerPass(CompileKernel, Recover,
+ ModuleUseAfterScope,
+ UseOdrIndicator));
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ AddressSanitizerPass(CompileKernel, Recover, UseAfterScope)));
});
}
- if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts))
- PB.registerPipelineStartEPCallback([Options](ModulePassManager &MPM) {
- MPM.addPass(GCOVProfilerPass(*Options));
- });
- if (Optional<InstrProfOptions> Options =
- getInstrProfOptions(CodeGenOpts, LangOpts))
- PB.registerPipelineStartEPCallback([Options](ModulePassManager &MPM) {
- MPM.addPass(InstrProfiling(*Options, false));
- });
-
- // Add UniqueInternalLinkageNames Pass which renames internal linkage
- // symbols with unique names.
- if (CodeGenOpts.UniqueInternalLinkageNames) {
- MPM.addPass(UniqueInternalLinkageNamesPass());
- }
+ };
+ ASanPass(SanitizerKind::Address, false);
+ ASanPass(SanitizerKind::KernelAddress, true);
- if (IsThinLTO) {
- MPM = PB.buildThinLTOPreLinkDefaultPipeline(
- Level, CodeGenOpts.DebugPassManager);
- MPM.addPass(CanonicalizeAliasesPass());
- MPM.addPass(NameAnonGlobalPass());
- } else if (IsLTO) {
- MPM = PB.buildLTOPreLinkDefaultPipeline(Level,
- CodeGenOpts.DebugPassManager);
- MPM.addPass(CanonicalizeAliasesPass());
- MPM.addPass(NameAnonGlobalPass());
- } else {
- MPM = PB.buildPerModuleDefaultPipeline(Level,
- CodeGenOpts.DebugPassManager);
+ auto HWASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
+ if (LangOpts.Sanitize.has(Mask)) {
+ bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
+ PB.registerOptimizerLastEPCallback(
+ [CompileKernel, Recover](ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(HWAddressSanitizerPass(CompileKernel, Recover));
+ });
}
+ };
+ HWASanPass(SanitizerKind::HWAddress, false);
+ HWASanPass(SanitizerKind::KernelHWAddress, true);
+
+ if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) {
+ PB.registerOptimizerLastEPCallback(
+ [this](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(
+ DataFlowSanitizerPass(LangOpts.SanitizerBlacklistFiles));
+ });
}
- if (LangOpts.Sanitize.has(SanitizerKind::HWAddress)) {
- bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::HWAddress);
- MPM.addPass(HWAddressSanitizerPass(
- /*CompileKernel=*/false, Recover));
- }
- if (LangOpts.Sanitize.has(SanitizerKind::KernelHWAddress)) {
- MPM.addPass(HWAddressSanitizerPass(
- /*CompileKernel=*/true, /*Recover=*/true));
- }
+ if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts, LangOpts))
+ PB.registerPipelineStartEPCallback(
+ [Options](ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(GCOVProfilerPass(*Options));
+ });
+ if (Optional<InstrProfOptions> Options =
+ getInstrProfOptions(CodeGenOpts, LangOpts))
+ PB.registerPipelineStartEPCallback(
+ [Options](ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(InstrProfiling(*Options, false));
+ });
if (CodeGenOpts.OptimizationLevel == 0) {
- addCoroutinePassesAtO0(MPM, LangOpts, CodeGenOpts);
- addSanitizersAtO0(MPM, TargetTriple, LangOpts, CodeGenOpts);
+ MPM = PB.buildO0DefaultPipeline(Level, IsLTO || IsThinLTO);
+ } else if (IsThinLTO) {
+ MPM = PB.buildThinLTOPreLinkDefaultPipeline(Level);
+ } else if (IsLTO) {
+ MPM = PB.buildLTOPreLinkDefaultPipeline(Level);
+ } else {
+ MPM = PB.buildPerModuleDefaultPipeline(Level);
+ }
+
+ if (!CodeGenOpts.MemoryProfileOutput.empty()) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(MemProfilerPass()));
+ MPM.addPass(ModuleMemProfilerPass());
}
}
@@ -1461,29 +1439,6 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
DwoOS->keep();
}
-Expected<BitcodeModule> clang::FindThinLTOModule(MemoryBufferRef MBRef) {
- Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef);
- if (!BMsOrErr)
- return BMsOrErr.takeError();
-
- // The bitcode file may contain multiple modules, we want the one that is
- // marked as being the ThinLTO module.
- if (const BitcodeModule *Bm = FindThinLTOModule(*BMsOrErr))
- return *Bm;
-
- return make_error<StringError>("Could not find module summary",
- inconvertibleErrorCode());
-}
-
-BitcodeModule *clang::FindThinLTOModule(MutableArrayRef<BitcodeModule> BMs) {
- for (BitcodeModule &BM : BMs) {
- Expected<BitcodeLTOInfo> LTOInfo = BM.getLTOInfo();
- if (LTOInfo && LTOInfo->IsThinLTO)
- return &BM;
- }
- return nullptr;
-}
-
static void runThinLTOBackend(
DiagnosticsEngine &Diags, ModuleSummaryIndex *CombinedIndex, Module *M,
const HeaderSearchOptions &HeaderOpts, const CodeGenOptions &CGOpts,
@@ -1500,46 +1455,12 @@ static void runThinLTOBackend(
// we should only invoke this using the individual indexes written out
// via a WriteIndexesThinBackend.
FunctionImporter::ImportMapTy ImportList;
- for (auto &GlobalList : *CombinedIndex) {
- // Ignore entries for undefined references.
- if (GlobalList.second.SummaryList.empty())
- continue;
-
- auto GUID = GlobalList.first;
- for (auto &Summary : GlobalList.second.SummaryList) {
- // Skip the summaries for the importing module. These are included to
- // e.g. record required linkage changes.
- if (Summary->modulePath() == M->getModuleIdentifier())
- continue;
- // Add an entry to provoke importing by thinBackend.
- ImportList[Summary->modulePath()].insert(GUID);
- }
- }
-
std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
MapVector<llvm::StringRef, llvm::BitcodeModule> ModuleMap;
+ if (!lto::loadReferencedModules(*M, *CombinedIndex, ImportList, ModuleMap,
+ OwnedImports))
+ return;
- for (auto &I : ImportList) {
- ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MBOrErr =
- llvm::MemoryBuffer::getFile(I.first());
- if (!MBOrErr) {
- errs() << "Error loading imported file '" << I.first()
- << "': " << MBOrErr.getError().message() << "\n";
- return;
- }
-
- Expected<BitcodeModule> BMOrErr = FindThinLTOModule(**MBOrErr);
- if (!BMOrErr) {
- handleAllErrors(BMOrErr.takeError(), [&](ErrorInfoBase &EIB) {
- errs() << "Error loading imported file '" << I.first()
- << "': " << EIB.message() << '\n';
- });
- return;
- }
- ModuleMap.insert({I.first(), *BMOrErr});
-
- OwnedImports.push_back(std::move(*MBOrErr));
- }
auto AddStream = [&](size_t Task) {
return std::make_unique<lto::NativeObjectStream>(std::move(OS));
};
@@ -1581,7 +1502,7 @@ static void runThinLTOBackend(
}
Conf.ProfileRemapping = std::move(ProfileRemapping);
- Conf.UseNewPM = CGOpts.ExperimentalNewPassManager;
+ Conf.UseNewPM = !CGOpts.LegacyPassManager;
Conf.DebugPassManager = CGOpts.DebugPassManager;
Conf.RemarksWithHotness = CGOpts.DiagnosticsWithHotness;
Conf.RemarksFilename = CGOpts.OptRecordFile;
@@ -1611,9 +1532,10 @@ static void runThinLTOBackend(
Conf.CGFileType = getCodeGenFileType(Action);
break;
}
- if (Error E = thinBackend(
- Conf, -1, AddStream, *M, *CombinedIndex, ImportList,
- ModuleToDefinedGVSummaries[M->getModuleIdentifier()], ModuleMap)) {
+ if (Error E =
+ thinBackend(Conf, -1, AddStream, *M, *CombinedIndex, ImportList,
+ ModuleToDefinedGVSummaries[M->getModuleIdentifier()],
+ ModuleMap, CGOpts.CmdArgs)) {
handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
errs() << "Error running ThinLTO backend: " << EIB.message() << '\n';
});
@@ -1670,7 +1592,7 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
EmitAssemblyHelper AsmHelper(Diags, HeaderOpts, CGOpts, TOpts, LOpts, M);
- if (CGOpts.ExperimentalNewPassManager)
+ if (!CGOpts.LegacyPassManager)
AsmHelper.EmitAssemblyWithNewPassManager(Action, std::move(OS));
else
AsmHelper.EmitAssembly(Action, std::move(OS));
@@ -1697,5 +1619,5 @@ void clang::EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts,
llvm::EmbedBitcodeInModule(
*M, Buf, CGOpts.getEmbedBitcode() != CodeGenOptions::Embed_Marker,
CGOpts.getEmbedBitcode() != CodeGenOptions::Embed_Bitcode,
- &CGOpts.CmdArgs);
+ CGOpts.CmdArgs);
}
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index a58450ddd4c5..c7256e240a31 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -119,7 +119,7 @@ namespace {
ValueTy = lvalue.getType();
ValueSizeInBits = C.getTypeSize(ValueTy);
AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
- lvalue.getType(), cast<llvm::VectorType>(
+ lvalue.getType(), cast<llvm::FixedVectorType>(
lvalue.getExtVectorAddress().getElementType())
->getNumElements());
AtomicSizeInBits = C.getTypeSize(AtomicTy);
@@ -307,7 +307,14 @@ static RValue emitAtomicLibcall(CodeGenFunction &CGF,
const CGFunctionInfo &fnInfo =
CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
- llvm::FunctionCallee fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
+ llvm::AttrBuilder fnAttrB;
+ fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
+ fnAttrB.addAttribute(llvm::Attribute::WillReturn);
+ llvm::AttributeList fnAttrs = llvm::AttributeList::get(
+ CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
+
+ llvm::FunctionCallee fn =
+ CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
auto callee = CGCallee::forDirect(fn);
return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
}
@@ -799,18 +806,27 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
return RValue::get(nullptr);
}
- CharUnits sizeChars, alignChars;
- std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
- uint64_t Size = sizeChars.getQuantity();
+ auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
+ uint64_t Size = TInfo.Width.getQuantity();
unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
- bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
- bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0;
+ bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
+ bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
bool UseLibcall = Misaligned | Oversized;
+ CharUnits MaxInlineWidth =
+ getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
- if (UseLibcall) {
- CGM.getDiags().Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
- << !Oversized;
+ DiagnosticsEngine &Diags = CGM.getDiags();
+
+ if (Misaligned) {
+ Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
+ << (int)TInfo.Width.getQuantity()
+ << (int)Ptr.getAlignment().getQuantity();
+ }
+
+ if (Oversized) {
+ Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
+ << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
}
llvm::Value *Order = EmitScalarExpr(E->getOrder());
@@ -1063,7 +1079,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
@@ -1076,7 +1092,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_exchange:
LibCallName = "__atomic_exchange";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
// void __atomic_store_N(T *mem, T val, int order)
@@ -1088,7 +1104,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
RetTy = getContext().VoidTy;
HaveRetTy = true;
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
// T __atomic_load_N(T *mem, int order)
@@ -1108,7 +1124,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), sizeChars);
+ LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
// T __atomic_and_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_and_N(T *mem, T val, int order)
@@ -1120,7 +1136,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
// T __atomic_or_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_or_N(T *mem, T val, int order)
@@ -1132,7 +1148,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
// T __atomic_sub_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_sub_N(T *mem, T val, int order)
@@ -1144,7 +1160,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), sizeChars);
+ LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
// T __atomic_xor_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_xor_N(T *mem, T val, int order)
@@ -1156,7 +1172,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_min_fetch:
PostOpMinMax = true;
@@ -1168,7 +1184,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
? "__atomic_fetch_min"
: "__atomic_fetch_umin";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), sizeChars);
+ LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_max_fetch:
PostOpMinMax = true;
@@ -1180,7 +1196,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
? "__atomic_fetch_max"
: "__atomic_fetch_umax";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), sizeChars);
+ LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
// T __atomic_nand_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_nand_N(T *mem, T val, int order)
@@ -1190,7 +1206,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_fetch_nand:
LibCallName = "__atomic_fetch_nand";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
}
@@ -1208,7 +1224,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// Value is returned directly.
// The function returns an appropriately sized integer type.
RetTy = getContext().getIntTypeForBitwidth(
- getContext().toBits(sizeChars), /*Signed=*/false);
+ getContext().toBits(TInfo.Width), /*Signed=*/false);
} else {
// Value is returned through parameter before the order.
RetTy = getContext().VoidTy;
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 615b78235041..91c726f4cf64 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -557,11 +557,10 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// Theoretically, this could be in a different address space, so
// don't assume standard pointer size/align.
llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
- std::pair<CharUnits,CharUnits> tinfo
- = CGM.getContext().getTypeInfoInChars(thisType);
- maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+ auto TInfo = CGM.getContext().getTypeInfoInChars(thisType);
+ maxFieldAlign = std::max(maxFieldAlign, TInfo.Align);
- layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
+ layout.push_back(BlockLayoutChunk(TInfo.Align, TInfo.Width,
Qualifiers::OCL_None,
nullptr, llvmType, thisType));
}
@@ -580,7 +579,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// Since a __block variable cannot be captured by lambdas, its type and
// the capture field type should always match.
- assert(getCaptureFieldType(*CGF, CI) == variable->getType() &&
+ assert(CGF && getCaptureFieldType(*CGF, CI) == variable->getType() &&
"capture type differs from the variable type");
layout.push_back(BlockLayoutChunk(align, CGM.getPointerSize(),
Qualifiers::OCL_None, &CI,
@@ -1024,7 +1023,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
type, VK_LValue, SourceLocation());
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
- &declRef, VK_RValue);
+ &declRef, VK_RValue, FPOptionsOverride());
// FIXME: Pass a specific location for the expr init so that the store is
// attributed to a reasonable location - otherwise it may be attributed to
// locations of subexpressions in the initialization.
@@ -2698,7 +2697,7 @@ const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
}
bool HasByrefExtendedLayout = false;
- Qualifiers::ObjCLifetime Lifetime;
+ Qualifiers::ObjCLifetime Lifetime = Qualifiers::OCL_None;
if (getContext().getByrefLifetime(Ty, Lifetime, HasByrefExtendedLayout) &&
HasByrefExtendedLayout) {
/// void *__byref_variable_layout;
@@ -2768,8 +2767,8 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
const VarDecl &D = *emission.Variable;
QualType type = D.getType();
- bool HasByrefExtendedLayout;
- Qualifiers::ObjCLifetime ByrefLifetime;
+ bool HasByrefExtendedLayout = false;
+ Qualifiers::ObjCLifetime ByrefLifetime = Qualifiers::OCL_None;
bool ByRefHasLifetime =
getContext().getByrefLifetime(type, ByrefLifetime, HasByrefExtendedLayout);
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 8994b939093e..113541bd5024 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -78,7 +78,8 @@ static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
}
if (CGF.CGM.stopAutoInit())
return;
- CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
+ auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
+ I->addAnnotationMetadata("auto-init");
}
/// getBuiltinLibFunction - Given a builtin id for a function like
@@ -303,6 +304,10 @@ Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering::Monotonic :
SuccessOrdering;
+ // The atomic instruction is marked volatile for consistency with MSVC. This
+ // blocks the few atomics optimizations that LLVM has. If we want to optimize
+ // _Interlocked* operations in the future, we will have to remove the volatile
+ // marker.
auto *Result = CGF.Builder.CreateAtomicCmpXchg(
Destination, Comparand, Exchange,
SuccessOrdering, FailureOrdering);
@@ -310,6 +315,68 @@ Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
return CGF.Builder.CreateExtractValue(Result, 0);
}
+// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
+// prototyped like this:
+//
+// unsigned char _InterlockedCompareExchange128...(
+// __int64 volatile * _Destination,
+// __int64 _ExchangeHigh,
+// __int64 _ExchangeLow,
+// __int64 * _ComparandResult);
+static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
+ const CallExpr *E,
+ AtomicOrdering SuccessOrdering) {
+ assert(E->getNumArgs() == 4);
+ llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
+ llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
+
+ assert(Destination->getType()->isPointerTy());
+ assert(!ExchangeHigh->getType()->isPointerTy());
+ assert(!ExchangeLow->getType()->isPointerTy());
+ assert(ComparandPtr->getType()->isPointerTy());
+
+ // For Release ordering, the failure ordering should be Monotonic.
+ auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
+ ? AtomicOrdering::Monotonic
+ : SuccessOrdering;
+
+ // Convert to i128 pointers and values.
+ llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
+ llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
+ Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
+ Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
+ CGF.getContext().toCharUnitsFromBits(128));
+
+ // (((i128)hi) << 64) | ((i128)lo)
+ ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
+ ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
+ ExchangeHigh =
+ CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
+ llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
+
+ // Load the comparand for the instruction.
+ llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
+
+ auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
+ SuccessOrdering, FailureOrdering);
+
+ // The atomic instruction is marked volatile for consistency with MSVC. This
+ // blocks the few atomics optimizations that LLVM has. If we want to optimize
+ // _Interlocked* operations in the future, we will have to remove the volatile
+ // marker.
+ CXI->setVolatile(true);
+
+ // Store the result as an outparameter.
+ CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
+ ComparandResult);
+
+ // Get the success boolean and zero extend it to i8.
+ Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
+ return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
+}
+
static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
assert(E->getArg(0)->getType()->isPointerType());
@@ -373,6 +440,7 @@ static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
if (CGF.Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
} else {
@@ -390,6 +458,7 @@ static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
if (CGF.Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
} else {
@@ -408,6 +477,7 @@ static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
if (CGF.Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
} else {
@@ -489,6 +559,7 @@ emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
if (CGF.Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
{ResultType, Src0->getType()});
return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
@@ -805,10 +876,15 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
AsmOS << "bt";
if (Action)
AsmOS << Action;
- AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}";
+ AsmOS << SizeSuffix << " $2, ($1)";
// Build the constraints. FIXME: We should support immediates when possible.
- std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}";
+ std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
+ std::string MachineClobbers = CGF.getTarget().getClobbers();
+ if (!MachineClobbers.empty()) {
+ Constraints += ',';
+ Constraints += MachineClobbers;
+ }
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(),
CGF.getContext().getTypeSize(E->getArg(1)->getType()));
@@ -987,6 +1063,10 @@ enum class CodeGenFunction::MSVCIntrin {
_InterlockedCompareExchange_acq,
_InterlockedCompareExchange_rel,
_InterlockedCompareExchange_nf,
+ _InterlockedCompareExchange128,
+ _InterlockedCompareExchange128_acq,
+ _InterlockedCompareExchange128_rel,
+ _InterlockedCompareExchange128_nf,
_InterlockedOr_acq,
_InterlockedOr_rel,
_InterlockedOr_nf,
@@ -1005,16 +1085,352 @@ enum class CodeGenFunction::MSVCIntrin {
__fastfail,
};
+static Optional<CodeGenFunction::MSVCIntrin>
+translateArmToMsvcIntrin(unsigned BuiltinID) {
+ using MSVCIntrin = CodeGenFunction::MSVCIntrin;
+ switch (BuiltinID) {
+ default:
+ return None;
+ case ARM::BI_BitScanForward:
+ case ARM::BI_BitScanForward64:
+ return MSVCIntrin::_BitScanForward;
+ case ARM::BI_BitScanReverse:
+ case ARM::BI_BitScanReverse64:
+ return MSVCIntrin::_BitScanReverse;
+ case ARM::BI_InterlockedAnd64:
+ return MSVCIntrin::_InterlockedAnd;
+ case ARM::BI_InterlockedExchange64:
+ return MSVCIntrin::_InterlockedExchange;
+ case ARM::BI_InterlockedExchangeAdd64:
+ return MSVCIntrin::_InterlockedExchangeAdd;
+ case ARM::BI_InterlockedExchangeSub64:
+ return MSVCIntrin::_InterlockedExchangeSub;
+ case ARM::BI_InterlockedOr64:
+ return MSVCIntrin::_InterlockedOr;
+ case ARM::BI_InterlockedXor64:
+ return MSVCIntrin::_InterlockedXor;
+ case ARM::BI_InterlockedDecrement64:
+ return MSVCIntrin::_InterlockedDecrement;
+ case ARM::BI_InterlockedIncrement64:
+ return MSVCIntrin::_InterlockedIncrement;
+ case ARM::BI_InterlockedExchangeAdd8_acq:
+ case ARM::BI_InterlockedExchangeAdd16_acq:
+ case ARM::BI_InterlockedExchangeAdd_acq:
+ case ARM::BI_InterlockedExchangeAdd64_acq:
+ return MSVCIntrin::_InterlockedExchangeAdd_acq;
+ case ARM::BI_InterlockedExchangeAdd8_rel:
+ case ARM::BI_InterlockedExchangeAdd16_rel:
+ case ARM::BI_InterlockedExchangeAdd_rel:
+ case ARM::BI_InterlockedExchangeAdd64_rel:
+ return MSVCIntrin::_InterlockedExchangeAdd_rel;
+ case ARM::BI_InterlockedExchangeAdd8_nf:
+ case ARM::BI_InterlockedExchangeAdd16_nf:
+ case ARM::BI_InterlockedExchangeAdd_nf:
+ case ARM::BI_InterlockedExchangeAdd64_nf:
+ return MSVCIntrin::_InterlockedExchangeAdd_nf;
+ case ARM::BI_InterlockedExchange8_acq:
+ case ARM::BI_InterlockedExchange16_acq:
+ case ARM::BI_InterlockedExchange_acq:
+ case ARM::BI_InterlockedExchange64_acq:
+ return MSVCIntrin::_InterlockedExchange_acq;
+ case ARM::BI_InterlockedExchange8_rel:
+ case ARM::BI_InterlockedExchange16_rel:
+ case ARM::BI_InterlockedExchange_rel:
+ case ARM::BI_InterlockedExchange64_rel:
+ return MSVCIntrin::_InterlockedExchange_rel;
+ case ARM::BI_InterlockedExchange8_nf:
+ case ARM::BI_InterlockedExchange16_nf:
+ case ARM::BI_InterlockedExchange_nf:
+ case ARM::BI_InterlockedExchange64_nf:
+ return MSVCIntrin::_InterlockedExchange_nf;
+ case ARM::BI_InterlockedCompareExchange8_acq:
+ case ARM::BI_InterlockedCompareExchange16_acq:
+ case ARM::BI_InterlockedCompareExchange_acq:
+ case ARM::BI_InterlockedCompareExchange64_acq:
+ return MSVCIntrin::_InterlockedCompareExchange_acq;
+ case ARM::BI_InterlockedCompareExchange8_rel:
+ case ARM::BI_InterlockedCompareExchange16_rel:
+ case ARM::BI_InterlockedCompareExchange_rel:
+ case ARM::BI_InterlockedCompareExchange64_rel:
+ return MSVCIntrin::_InterlockedCompareExchange_rel;
+ case ARM::BI_InterlockedCompareExchange8_nf:
+ case ARM::BI_InterlockedCompareExchange16_nf:
+ case ARM::BI_InterlockedCompareExchange_nf:
+ case ARM::BI_InterlockedCompareExchange64_nf:
+ return MSVCIntrin::_InterlockedCompareExchange_nf;
+ case ARM::BI_InterlockedOr8_acq:
+ case ARM::BI_InterlockedOr16_acq:
+ case ARM::BI_InterlockedOr_acq:
+ case ARM::BI_InterlockedOr64_acq:
+ return MSVCIntrin::_InterlockedOr_acq;
+ case ARM::BI_InterlockedOr8_rel:
+ case ARM::BI_InterlockedOr16_rel:
+ case ARM::BI_InterlockedOr_rel:
+ case ARM::BI_InterlockedOr64_rel:
+ return MSVCIntrin::_InterlockedOr_rel;
+ case ARM::BI_InterlockedOr8_nf:
+ case ARM::BI_InterlockedOr16_nf:
+ case ARM::BI_InterlockedOr_nf:
+ case ARM::BI_InterlockedOr64_nf:
+ return MSVCIntrin::_InterlockedOr_nf;
+ case ARM::BI_InterlockedXor8_acq:
+ case ARM::BI_InterlockedXor16_acq:
+ case ARM::BI_InterlockedXor_acq:
+ case ARM::BI_InterlockedXor64_acq:
+ return MSVCIntrin::_InterlockedXor_acq;
+ case ARM::BI_InterlockedXor8_rel:
+ case ARM::BI_InterlockedXor16_rel:
+ case ARM::BI_InterlockedXor_rel:
+ case ARM::BI_InterlockedXor64_rel:
+ return MSVCIntrin::_InterlockedXor_rel;
+ case ARM::BI_InterlockedXor8_nf:
+ case ARM::BI_InterlockedXor16_nf:
+ case ARM::BI_InterlockedXor_nf:
+ case ARM::BI_InterlockedXor64_nf:
+ return MSVCIntrin::_InterlockedXor_nf;
+ case ARM::BI_InterlockedAnd8_acq:
+ case ARM::BI_InterlockedAnd16_acq:
+ case ARM::BI_InterlockedAnd_acq:
+ case ARM::BI_InterlockedAnd64_acq:
+ return MSVCIntrin::_InterlockedAnd_acq;
+ case ARM::BI_InterlockedAnd8_rel:
+ case ARM::BI_InterlockedAnd16_rel:
+ case ARM::BI_InterlockedAnd_rel:
+ case ARM::BI_InterlockedAnd64_rel:
+ return MSVCIntrin::_InterlockedAnd_rel;
+ case ARM::BI_InterlockedAnd8_nf:
+ case ARM::BI_InterlockedAnd16_nf:
+ case ARM::BI_InterlockedAnd_nf:
+ case ARM::BI_InterlockedAnd64_nf:
+ return MSVCIntrin::_InterlockedAnd_nf;
+ case ARM::BI_InterlockedIncrement16_acq:
+ case ARM::BI_InterlockedIncrement_acq:
+ case ARM::BI_InterlockedIncrement64_acq:
+ return MSVCIntrin::_InterlockedIncrement_acq;
+ case ARM::BI_InterlockedIncrement16_rel:
+ case ARM::BI_InterlockedIncrement_rel:
+ case ARM::BI_InterlockedIncrement64_rel:
+ return MSVCIntrin::_InterlockedIncrement_rel;
+ case ARM::BI_InterlockedIncrement16_nf:
+ case ARM::BI_InterlockedIncrement_nf:
+ case ARM::BI_InterlockedIncrement64_nf:
+ return MSVCIntrin::_InterlockedIncrement_nf;
+ case ARM::BI_InterlockedDecrement16_acq:
+ case ARM::BI_InterlockedDecrement_acq:
+ case ARM::BI_InterlockedDecrement64_acq:
+ return MSVCIntrin::_InterlockedDecrement_acq;
+ case ARM::BI_InterlockedDecrement16_rel:
+ case ARM::BI_InterlockedDecrement_rel:
+ case ARM::BI_InterlockedDecrement64_rel:
+ return MSVCIntrin::_InterlockedDecrement_rel;
+ case ARM::BI_InterlockedDecrement16_nf:
+ case ARM::BI_InterlockedDecrement_nf:
+ case ARM::BI_InterlockedDecrement64_nf:
+ return MSVCIntrin::_InterlockedDecrement_nf;
+ }
+ llvm_unreachable("must return from switch");
+}
+
+static Optional<CodeGenFunction::MSVCIntrin>
+translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
+ using MSVCIntrin = CodeGenFunction::MSVCIntrin;
+ switch (BuiltinID) {
+ default:
+ return None;
+ case AArch64::BI_BitScanForward:
+ case AArch64::BI_BitScanForward64:
+ return MSVCIntrin::_BitScanForward;
+ case AArch64::BI_BitScanReverse:
+ case AArch64::BI_BitScanReverse64:
+ return MSVCIntrin::_BitScanReverse;
+ case AArch64::BI_InterlockedAnd64:
+ return MSVCIntrin::_InterlockedAnd;
+ case AArch64::BI_InterlockedExchange64:
+ return MSVCIntrin::_InterlockedExchange;
+ case AArch64::BI_InterlockedExchangeAdd64:
+ return MSVCIntrin::_InterlockedExchangeAdd;
+ case AArch64::BI_InterlockedExchangeSub64:
+ return MSVCIntrin::_InterlockedExchangeSub;
+ case AArch64::BI_InterlockedOr64:
+ return MSVCIntrin::_InterlockedOr;
+ case AArch64::BI_InterlockedXor64:
+ return MSVCIntrin::_InterlockedXor;
+ case AArch64::BI_InterlockedDecrement64:
+ return MSVCIntrin::_InterlockedDecrement;
+ case AArch64::BI_InterlockedIncrement64:
+ return MSVCIntrin::_InterlockedIncrement;
+ case AArch64::BI_InterlockedExchangeAdd8_acq:
+ case AArch64::BI_InterlockedExchangeAdd16_acq:
+ case AArch64::BI_InterlockedExchangeAdd_acq:
+ case AArch64::BI_InterlockedExchangeAdd64_acq:
+ return MSVCIntrin::_InterlockedExchangeAdd_acq;
+ case AArch64::BI_InterlockedExchangeAdd8_rel:
+ case AArch64::BI_InterlockedExchangeAdd16_rel:
+ case AArch64::BI_InterlockedExchangeAdd_rel:
+ case AArch64::BI_InterlockedExchangeAdd64_rel:
+ return MSVCIntrin::_InterlockedExchangeAdd_rel;
+ case AArch64::BI_InterlockedExchangeAdd8_nf:
+ case AArch64::BI_InterlockedExchangeAdd16_nf:
+ case AArch64::BI_InterlockedExchangeAdd_nf:
+ case AArch64::BI_InterlockedExchangeAdd64_nf:
+ return MSVCIntrin::_InterlockedExchangeAdd_nf;
+ case AArch64::BI_InterlockedExchange8_acq:
+ case AArch64::BI_InterlockedExchange16_acq:
+ case AArch64::BI_InterlockedExchange_acq:
+ case AArch64::BI_InterlockedExchange64_acq:
+ return MSVCIntrin::_InterlockedExchange_acq;
+ case AArch64::BI_InterlockedExchange8_rel:
+ case AArch64::BI_InterlockedExchange16_rel:
+ case AArch64::BI_InterlockedExchange_rel:
+ case AArch64::BI_InterlockedExchange64_rel:
+ return MSVCIntrin::_InterlockedExchange_rel;
+ case AArch64::BI_InterlockedExchange8_nf:
+ case AArch64::BI_InterlockedExchange16_nf:
+ case AArch64::BI_InterlockedExchange_nf:
+ case AArch64::BI_InterlockedExchange64_nf:
+ return MSVCIntrin::_InterlockedExchange_nf;
+ case AArch64::BI_InterlockedCompareExchange8_acq:
+ case AArch64::BI_InterlockedCompareExchange16_acq:
+ case AArch64::BI_InterlockedCompareExchange_acq:
+ case AArch64::BI_InterlockedCompareExchange64_acq:
+ return MSVCIntrin::_InterlockedCompareExchange_acq;
+ case AArch64::BI_InterlockedCompareExchange8_rel:
+ case AArch64::BI_InterlockedCompareExchange16_rel:
+ case AArch64::BI_InterlockedCompareExchange_rel:
+ case AArch64::BI_InterlockedCompareExchange64_rel:
+ return MSVCIntrin::_InterlockedCompareExchange_rel;
+ case AArch64::BI_InterlockedCompareExchange8_nf:
+ case AArch64::BI_InterlockedCompareExchange16_nf:
+ case AArch64::BI_InterlockedCompareExchange_nf:
+ case AArch64::BI_InterlockedCompareExchange64_nf:
+ return MSVCIntrin::_InterlockedCompareExchange_nf;
+ case AArch64::BI_InterlockedCompareExchange128:
+ return MSVCIntrin::_InterlockedCompareExchange128;
+ case AArch64::BI_InterlockedCompareExchange128_acq:
+ return MSVCIntrin::_InterlockedCompareExchange128_acq;
+ case AArch64::BI_InterlockedCompareExchange128_nf:
+ return MSVCIntrin::_InterlockedCompareExchange128_nf;
+ case AArch64::BI_InterlockedCompareExchange128_rel:
+ return MSVCIntrin::_InterlockedCompareExchange128_rel;
+ case AArch64::BI_InterlockedOr8_acq:
+ case AArch64::BI_InterlockedOr16_acq:
+ case AArch64::BI_InterlockedOr_acq:
+ case AArch64::BI_InterlockedOr64_acq:
+ return MSVCIntrin::_InterlockedOr_acq;
+ case AArch64::BI_InterlockedOr8_rel:
+ case AArch64::BI_InterlockedOr16_rel:
+ case AArch64::BI_InterlockedOr_rel:
+ case AArch64::BI_InterlockedOr64_rel:
+ return MSVCIntrin::_InterlockedOr_rel;
+ case AArch64::BI_InterlockedOr8_nf:
+ case AArch64::BI_InterlockedOr16_nf:
+ case AArch64::BI_InterlockedOr_nf:
+ case AArch64::BI_InterlockedOr64_nf:
+ return MSVCIntrin::_InterlockedOr_nf;
+ case AArch64::BI_InterlockedXor8_acq:
+ case AArch64::BI_InterlockedXor16_acq:
+ case AArch64::BI_InterlockedXor_acq:
+ case AArch64::BI_InterlockedXor64_acq:
+ return MSVCIntrin::_InterlockedXor_acq;
+ case AArch64::BI_InterlockedXor8_rel:
+ case AArch64::BI_InterlockedXor16_rel:
+ case AArch64::BI_InterlockedXor_rel:
+ case AArch64::BI_InterlockedXor64_rel:
+ return MSVCIntrin::_InterlockedXor_rel;
+ case AArch64::BI_InterlockedXor8_nf:
+ case AArch64::BI_InterlockedXor16_nf:
+ case AArch64::BI_InterlockedXor_nf:
+ case AArch64::BI_InterlockedXor64_nf:
+ return MSVCIntrin::_InterlockedXor_nf;
+ case AArch64::BI_InterlockedAnd8_acq:
+ case AArch64::BI_InterlockedAnd16_acq:
+ case AArch64::BI_InterlockedAnd_acq:
+ case AArch64::BI_InterlockedAnd64_acq:
+ return MSVCIntrin::_InterlockedAnd_acq;
+ case AArch64::BI_InterlockedAnd8_rel:
+ case AArch64::BI_InterlockedAnd16_rel:
+ case AArch64::BI_InterlockedAnd_rel:
+ case AArch64::BI_InterlockedAnd64_rel:
+ return MSVCIntrin::_InterlockedAnd_rel;
+ case AArch64::BI_InterlockedAnd8_nf:
+ case AArch64::BI_InterlockedAnd16_nf:
+ case AArch64::BI_InterlockedAnd_nf:
+ case AArch64::BI_InterlockedAnd64_nf:
+ return MSVCIntrin::_InterlockedAnd_nf;
+ case AArch64::BI_InterlockedIncrement16_acq:
+ case AArch64::BI_InterlockedIncrement_acq:
+ case AArch64::BI_InterlockedIncrement64_acq:
+ return MSVCIntrin::_InterlockedIncrement_acq;
+ case AArch64::BI_InterlockedIncrement16_rel:
+ case AArch64::BI_InterlockedIncrement_rel:
+ case AArch64::BI_InterlockedIncrement64_rel:
+ return MSVCIntrin::_InterlockedIncrement_rel;
+ case AArch64::BI_InterlockedIncrement16_nf:
+ case AArch64::BI_InterlockedIncrement_nf:
+ case AArch64::BI_InterlockedIncrement64_nf:
+ return MSVCIntrin::_InterlockedIncrement_nf;
+ case AArch64::BI_InterlockedDecrement16_acq:
+ case AArch64::BI_InterlockedDecrement_acq:
+ case AArch64::BI_InterlockedDecrement64_acq:
+ return MSVCIntrin::_InterlockedDecrement_acq;
+ case AArch64::BI_InterlockedDecrement16_rel:
+ case AArch64::BI_InterlockedDecrement_rel:
+ case AArch64::BI_InterlockedDecrement64_rel:
+ return MSVCIntrin::_InterlockedDecrement_rel;
+ case AArch64::BI_InterlockedDecrement16_nf:
+ case AArch64::BI_InterlockedDecrement_nf:
+ case AArch64::BI_InterlockedDecrement64_nf:
+ return MSVCIntrin::_InterlockedDecrement_nf;
+ }
+ llvm_unreachable("must return from switch");
+}
+
+static Optional<CodeGenFunction::MSVCIntrin>
+translateX86ToMsvcIntrin(unsigned BuiltinID) {
+ using MSVCIntrin = CodeGenFunction::MSVCIntrin;
+ switch (BuiltinID) {
+ default:
+ return None;
+ case clang::X86::BI_BitScanForward:
+ case clang::X86::BI_BitScanForward64:
+ return MSVCIntrin::_BitScanForward;
+ case clang::X86::BI_BitScanReverse:
+ case clang::X86::BI_BitScanReverse64:
+ return MSVCIntrin::_BitScanReverse;
+ case clang::X86::BI_InterlockedAnd64:
+ return MSVCIntrin::_InterlockedAnd;
+ case clang::X86::BI_InterlockedCompareExchange128:
+ return MSVCIntrin::_InterlockedCompareExchange128;
+ case clang::X86::BI_InterlockedExchange64:
+ return MSVCIntrin::_InterlockedExchange;
+ case clang::X86::BI_InterlockedExchangeAdd64:
+ return MSVCIntrin::_InterlockedExchangeAdd;
+ case clang::X86::BI_InterlockedExchangeSub64:
+ return MSVCIntrin::_InterlockedExchangeSub;
+ case clang::X86::BI_InterlockedOr64:
+ return MSVCIntrin::_InterlockedOr;
+ case clang::X86::BI_InterlockedXor64:
+ return MSVCIntrin::_InterlockedXor;
+ case clang::X86::BI_InterlockedDecrement64:
+ return MSVCIntrin::_InterlockedDecrement;
+ case clang::X86::BI_InterlockedIncrement64:
+ return MSVCIntrin::_InterlockedIncrement;
+ }
+ llvm_unreachable("must return from switch");
+}
+
+// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case MSVCIntrin::_BitScanForward:
case MSVCIntrin::_BitScanReverse: {
+ Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
Value *ArgValue = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = ArgValue->getType();
llvm::Type *IndexType =
- EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
+ IndexAddress.getPointer()->getType()->getPointerElementType();
llvm::Type *ResultType = ConvertType(E->getType());
Value *ArgZero = llvm::Constant::getNullValue(ArgType);
@@ -1033,7 +1449,6 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
Result->addIncoming(ResZero, Begin);
Builder.SetInsertPoint(NotZero);
- Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
if (BuiltinID == MSVCIntrin::_BitScanForward) {
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
@@ -1092,6 +1507,15 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
case MSVCIntrin::_InterlockedCompareExchange_nf:
return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedCompareExchange128:
+ return EmitAtomicCmpXchg128ForMSIntrin(
+ *this, E, AtomicOrdering::SequentiallyConsistent);
+ case MSVCIntrin::_InterlockedCompareExchange128_acq:
+ return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedCompareExchange128_rel:
+ return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedCompareExchange128_nf:
+ return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedOr_acq:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
AtomicOrdering::Acquire);
@@ -1408,6 +1832,47 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
return RValue::get(BufAddr.getPointer());
}
+static bool isSpecialUnsignedMultiplySignedResult(
+ unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
+ WidthAndSignedness ResultInfo) {
+ return BuiltinID == Builtin::BI__builtin_mul_overflow &&
+ Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
+ !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
+}
+
+static RValue EmitCheckedUnsignedMultiplySignedResult(
+ CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
+ const clang::Expr *Op2, WidthAndSignedness Op2Info,
+ const clang::Expr *ResultArg, QualType ResultQTy,
+ WidthAndSignedness ResultInfo) {
+ assert(isSpecialUnsignedMultiplySignedResult(
+ Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
+ "Cannot specialize this multiply");
+
+ llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
+ llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
+
+ llvm::Value *HasOverflow;
+ llvm::Value *Result = EmitOverflowIntrinsic(
+ CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
+
+ // The intrinsic call will detect overflow when the value is > UINT_MAX,
+ // however, since the original builtin had a signed result, we need to report
+ // an overflow when the result is greater than INT_MAX.
+ auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
+ llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
+
+ llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
+ HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
+
+ bool isVolatile =
+ ResultArg->getType()->getPointeeType().isVolatileQualified();
+ Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
+ CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
+ isVolatile);
+ return RValue::get(HasOverflow);
+}
+
/// Determine if a binop is a checked mixed-sign multiply we can specialize.
static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
WidthAndSignedness Op1Info,
@@ -1642,6 +2107,78 @@ RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
}
+// Map math builtins for long-double to f128 version.
+static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
+ switch (BuiltinID) {
+#define MUTATE_LDBL(func) \
+ case Builtin::BI__builtin_##func##l: \
+ return Builtin::BI__builtin_##func##f128;
+ MUTATE_LDBL(sqrt)
+ MUTATE_LDBL(cbrt)
+ MUTATE_LDBL(fabs)
+ MUTATE_LDBL(log)
+ MUTATE_LDBL(log2)
+ MUTATE_LDBL(log10)
+ MUTATE_LDBL(log1p)
+ MUTATE_LDBL(logb)
+ MUTATE_LDBL(exp)
+ MUTATE_LDBL(exp2)
+ MUTATE_LDBL(expm1)
+ MUTATE_LDBL(fdim)
+ MUTATE_LDBL(hypot)
+ MUTATE_LDBL(ilogb)
+ MUTATE_LDBL(pow)
+ MUTATE_LDBL(fmin)
+ MUTATE_LDBL(fmax)
+ MUTATE_LDBL(ceil)
+ MUTATE_LDBL(trunc)
+ MUTATE_LDBL(rint)
+ MUTATE_LDBL(nearbyint)
+ MUTATE_LDBL(round)
+ MUTATE_LDBL(floor)
+ MUTATE_LDBL(lround)
+ MUTATE_LDBL(llround)
+ MUTATE_LDBL(lrint)
+ MUTATE_LDBL(llrint)
+ MUTATE_LDBL(fmod)
+ MUTATE_LDBL(modf)
+ MUTATE_LDBL(nan)
+ MUTATE_LDBL(nans)
+ MUTATE_LDBL(inf)
+ MUTATE_LDBL(fma)
+ MUTATE_LDBL(sin)
+ MUTATE_LDBL(cos)
+ MUTATE_LDBL(tan)
+ MUTATE_LDBL(sinh)
+ MUTATE_LDBL(cosh)
+ MUTATE_LDBL(tanh)
+ MUTATE_LDBL(asin)
+ MUTATE_LDBL(acos)
+ MUTATE_LDBL(atan)
+ MUTATE_LDBL(asinh)
+ MUTATE_LDBL(acosh)
+ MUTATE_LDBL(atanh)
+ MUTATE_LDBL(atan2)
+ MUTATE_LDBL(erf)
+ MUTATE_LDBL(erfc)
+ MUTATE_LDBL(ldexp)
+ MUTATE_LDBL(frexp)
+ MUTATE_LDBL(huge_val)
+ MUTATE_LDBL(copysign)
+ MUTATE_LDBL(nextafter)
+ MUTATE_LDBL(nexttoward)
+ MUTATE_LDBL(remainder)
+ MUTATE_LDBL(remquo)
+ MUTATE_LDBL(scalbln)
+ MUTATE_LDBL(scalbn)
+ MUTATE_LDBL(tgamma)
+ MUTATE_LDBL(lgamma)
+#undef MUTATE_LDBL
+ default:
+ return BuiltinID;
+ }
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue) {
@@ -1658,13 +2195,28 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Result.Val.getFloat()));
}
+ // If current long-double semantics is IEEE 128-bit, replace math builtins
+ // of long-double with f128 equivalent.
+ // TODO: This mutation should also be applied to other targets other than PPC,
+ // after backend supports IEEE 128-bit style libcalls.
+ if (getTarget().getTriple().isPPC64() &&
+ &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
+ BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
+
+ // If the builtin has been declared explicitly with an assembler label,
+ // disable the specialized emitting below. Ideally we should communicate the
+ // rename in IR, or at least avoid generating the intrinsic calls that are
+ // likely to get lowered to the renamed library functions.
+ const unsigned BuiltinIDIfNoAsmLabel =
+ FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
+
// There are LLVM math intrinsics/instructions corresponding to math library
// functions except the LLVM op will never set errno while the math library
// might. Also, math builtins have the same semantics as their math library
// twins. Thus, we can transform math library and builtin calls to their
// LLVM counterparts if the call is marked 'const' (known to never set errno).
if (FD->hasAttr<ConstAttr>()) {
- switch (BuiltinID) {
+ switch (BuiltinIDIfNoAsmLabel) {
case Builtin::BIceil:
case Builtin::BIceilf:
case Builtin::BIceill:
@@ -1672,6 +2224,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_ceilf:
case Builtin::BI__builtin_ceilf16:
case Builtin::BI__builtin_ceill:
+ case Builtin::BI__builtin_ceilf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::ceil,
Intrinsic::experimental_constrained_ceil));
@@ -1693,6 +2246,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_cosf:
case Builtin::BI__builtin_cosf16:
case Builtin::BI__builtin_cosl:
+ case Builtin::BI__builtin_cosf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::cos,
Intrinsic::experimental_constrained_cos));
@@ -1704,6 +2258,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_expf:
case Builtin::BI__builtin_expf16:
case Builtin::BI__builtin_expl:
+ case Builtin::BI__builtin_expf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::exp,
Intrinsic::experimental_constrained_exp));
@@ -1715,6 +2270,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_exp2f:
case Builtin::BI__builtin_exp2f16:
case Builtin::BI__builtin_exp2l:
+ case Builtin::BI__builtin_exp2f128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::exp2,
Intrinsic::experimental_constrained_exp2));
@@ -1736,6 +2292,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_floorf:
case Builtin::BI__builtin_floorf16:
case Builtin::BI__builtin_floorl:
+ case Builtin::BI__builtin_floorf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::floor,
Intrinsic::experimental_constrained_floor));
@@ -1747,6 +2304,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmaf:
case Builtin::BI__builtin_fmaf16:
case Builtin::BI__builtin_fmal:
+ case Builtin::BI__builtin_fmaf128:
return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::fma,
Intrinsic::experimental_constrained_fma));
@@ -1758,6 +2316,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmaxf:
case Builtin::BI__builtin_fmaxf16:
case Builtin::BI__builtin_fmaxl:
+ case Builtin::BI__builtin_fmaxf128:
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::maxnum,
Intrinsic::experimental_constrained_maxnum));
@@ -1769,6 +2328,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fminf:
case Builtin::BI__builtin_fminf16:
case Builtin::BI__builtin_fminl:
+ case Builtin::BI__builtin_fminf128:
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::minnum,
Intrinsic::experimental_constrained_minnum));
@@ -1781,7 +2341,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmod:
case Builtin::BI__builtin_fmodf:
case Builtin::BI__builtin_fmodf16:
- case Builtin::BI__builtin_fmodl: {
+ case Builtin::BI__builtin_fmodl:
+ case Builtin::BI__builtin_fmodf128: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Value *Arg1 = EmitScalarExpr(E->getArg(0));
Value *Arg2 = EmitScalarExpr(E->getArg(1));
return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
@@ -1794,6 +2356,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_logf:
case Builtin::BI__builtin_logf16:
case Builtin::BI__builtin_logl:
+ case Builtin::BI__builtin_logf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log,
Intrinsic::experimental_constrained_log));
@@ -1805,6 +2368,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_log10f:
case Builtin::BI__builtin_log10f16:
case Builtin::BI__builtin_log10l:
+ case Builtin::BI__builtin_log10f128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log10,
Intrinsic::experimental_constrained_log10));
@@ -1816,6 +2380,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_log2f:
case Builtin::BI__builtin_log2f16:
case Builtin::BI__builtin_log2l:
+ case Builtin::BI__builtin_log2f128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log2,
Intrinsic::experimental_constrained_log2));
@@ -1826,6 +2391,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_nearbyint:
case Builtin::BI__builtin_nearbyintf:
case Builtin::BI__builtin_nearbyintl:
+ case Builtin::BI__builtin_nearbyintf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::nearbyint,
Intrinsic::experimental_constrained_nearbyint));
@@ -1837,6 +2403,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_powf:
case Builtin::BI__builtin_powf16:
case Builtin::BI__builtin_powl:
+ case Builtin::BI__builtin_powf128:
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::pow,
Intrinsic::experimental_constrained_pow));
@@ -1848,6 +2415,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_rintf:
case Builtin::BI__builtin_rintf16:
case Builtin::BI__builtin_rintl:
+ case Builtin::BI__builtin_rintf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::rint,
Intrinsic::experimental_constrained_rint));
@@ -1859,6 +2427,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_roundf:
case Builtin::BI__builtin_roundf16:
case Builtin::BI__builtin_roundl:
+ case Builtin::BI__builtin_roundf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::round,
Intrinsic::experimental_constrained_round));
@@ -1870,6 +2439,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_sinf:
case Builtin::BI__builtin_sinf16:
case Builtin::BI__builtin_sinl:
+ case Builtin::BI__builtin_sinf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::sin,
Intrinsic::experimental_constrained_sin));
@@ -1881,6 +2451,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_sqrtf:
case Builtin::BI__builtin_sqrtf16:
case Builtin::BI__builtin_sqrtl:
+ case Builtin::BI__builtin_sqrtf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::sqrt,
Intrinsic::experimental_constrained_sqrt));
@@ -1892,6 +2463,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_truncf:
case Builtin::BI__builtin_truncf16:
case Builtin::BI__builtin_truncl:
+ case Builtin::BI__builtin_truncf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::trunc,
Intrinsic::experimental_constrained_trunc));
@@ -1902,6 +2474,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_lround:
case Builtin::BI__builtin_lroundf:
case Builtin::BI__builtin_lroundl:
+ case Builtin::BI__builtin_lroundf128:
return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
*this, E, Intrinsic::lround,
Intrinsic::experimental_constrained_lround));
@@ -1912,6 +2485,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_llround:
case Builtin::BI__builtin_llroundf:
case Builtin::BI__builtin_llroundl:
+ case Builtin::BI__builtin_llroundf128:
return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
*this, E, Intrinsic::llround,
Intrinsic::experimental_constrained_llround));
@@ -1922,6 +2496,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_lrint:
case Builtin::BI__builtin_lrintf:
case Builtin::BI__builtin_lrintl:
+ case Builtin::BI__builtin_lrintf128:
return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
*this, E, Intrinsic::lrint,
Intrinsic::experimental_constrained_lrint));
@@ -1932,6 +2507,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_llrint:
case Builtin::BI__builtin_llrintf:
case Builtin::BI__builtin_llrintl:
+ case Builtin::BI__builtin_llrintf128:
return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
*this, E, Intrinsic::llrint,
Intrinsic::experimental_constrained_llrint));
@@ -1941,7 +2517,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
}
- switch (BuiltinID) {
+ switch (BuiltinIDIfNoAsmLabel) {
default: break;
case Builtin::BI__builtin___CFStringMakeConstantString:
case Builtin::BI__builtin___NSStringMakeConstantString:
@@ -1978,6 +2554,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_complex: {
+ Value *Real = EmitScalarExpr(E->getArg(0));
+ Value *Imag = EmitScalarExpr(E->getArg(1));
+ return RValue::getComplex({Real, Imag});
+ }
case Builtin::BI__builtin_conj:
case Builtin::BI__builtin_conjf:
case Builtin::BI__builtin_conjl:
@@ -2373,6 +2954,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_isunordered: {
// Ordered comparisons: we know the arguments to these are matching scalar
// floating point values.
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
@@ -2401,6 +2984,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isnan: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *V = EmitScalarExpr(E->getArg(0));
V = Builder.CreateFCmpUNO(V, V, "cmp");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
@@ -2464,6 +3049,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// isinf(x) --> fabs(x) == infinity
// isfinite(x) --> fabs(x) != infinity
// x != NaN via the ordered compare in either case.
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *V = EmitScalarExpr(E->getArg(0));
Value *Fabs = EmitFAbs(*this, V);
Constant *Infinity = ConstantFP::getInfinity(V->getType());
@@ -2476,6 +3063,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_isinf_sign: {
// isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *Arg = EmitScalarExpr(E->getArg(0));
Value *AbsArg = EmitFAbs(*this, Arg);
Value *IsInf = Builder.CreateFCmpOEQ(
@@ -2493,6 +3082,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_isnormal: {
// isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
@@ -2521,6 +3112,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_fpclassify: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *V = EmitScalarExpr(E->getArg(5));
llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
@@ -3386,7 +3979,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// non-wide string literal, potentially casted, so the cast<> is safe.
const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
- return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
+ return RValue::get(
+ EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
}
case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
@@ -3479,6 +4073,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
RightInfo, ResultArg, ResultQTy,
ResultInfo);
+ if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
+ ResultInfo))
+ return EmitCheckedUnsignedMultiplySignedResult(
+ *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
+ ResultInfo);
+
WidthAndSignedness EncompassingInfo =
EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
@@ -3754,11 +4354,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI_abnormal_termination:
return RValue::get(EmitSEHAbnormalTermination());
case Builtin::BI_setjmpex:
- if (getTarget().getTriple().isOSMSVCRT())
+ if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
+ E->getArg(0)->getType()->isPointerType())
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
break;
case Builtin::BI_setjmp:
- if (getTarget().getTriple().isOSMSVCRT()) {
+ if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
+ E->getArg(0)->getType()->isPointerType()) {
if (getTarget().getTriple().getArch() == llvm::Triple::x86)
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
@@ -3838,8 +4440,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, BCast, PacketSize, PacketAlign}));
+ EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, BCast, PacketSize, PacketAlign}));
} else {
assert(4 == E->getNumArgs() &&
"Illegal number of parameters to pipe function");
@@ -3857,9 +4459,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// it to i32.
if (Arg2->getType() != Int32Ty)
Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
- return RValue::get(Builder.CreateCall(
- CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
+ return RValue::get(
+ EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
}
}
// OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
@@ -3900,9 +4502,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// it to i32.
if (Arg1->getType() != Int32Ty)
Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, Arg1, PacketSize, PacketAlign}));
+ return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, Arg1, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
// functions
@@ -3938,9 +4539,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, Arg1, PacketSize, PacketAlign}));
+ return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, Arg1, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
case Builtin::BIget_pipe_num_packets:
@@ -3963,8 +4563,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, PacketSize, PacketAlign}));
+ return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
@@ -3986,7 +4586,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
auto NewCall =
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
+ EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
ConvertType(E->getType())));
}
@@ -4029,8 +4629,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
auto RTCall =
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
- {Queue, Flags, Range, Kernel, Block});
+ EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
+ {Queue, Flags, Range, Kernel, Block});
RTCall->setAttributes(ByValAttrSet);
return RValue::get(RTCall);
}
@@ -4089,7 +4689,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
auto Call = RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
+ EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
if (TmpSize)
EmitLifetimeEnd(TmpSize, TmpPtr);
return Call;
@@ -4147,8 +4747,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
+ EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
+ llvm::ArrayRef<llvm::Value *>(Args)));
}
// Has event info and variadics
// Pass the number of variadics to the runtime function too.
@@ -4164,8 +4764,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
auto Call =
- RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
+ RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
+ llvm::ArrayRef<llvm::Value *>(Args)));
if (TmpSize)
EmitLifetimeEnd(TmpSize, TmpPtr);
return Call;
@@ -4181,7 +4781,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- return RValue::get(Builder.CreateCall(
+ return RValue::get(EmitRuntimeCall(
CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
false),
@@ -4195,7 +4795,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- return RValue::get(Builder.CreateCall(
+ return RValue::get(EmitRuntimeCall(
CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
false),
@@ -4216,7 +4816,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
? "__get_kernel_max_sub_group_size_for_ndrange_impl"
: "__get_kernel_sub_group_count_for_ndrange_impl";
- return RValue::get(Builder.CreateCall(
+ return RValue::get(EmitRuntimeCall(
CGM.CreateRuntimeFunction(
llvm::FunctionType::get(
IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
@@ -4419,11 +5019,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
} else {
// If this is required to be a constant, constant fold it so that we
// know that the generated intrinsic gets a ConstantInt.
- llvm::APSInt Result;
- bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
- assert(IsConst && "Constant arg isn't actually constant?");
- (void)IsConst;
- ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
+ ArgValue = llvm::ConstantInt::get(
+ getLLVMContext(),
+ *E->getArg(i)->getIntegerConstantExpr(getContext()));
}
// If the intrinsic arg type is different from the builtin arg type
@@ -4523,6 +5121,7 @@ static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
case llvm::Triple::x86_64:
return CGF->EmitX86BuiltinExpr(BuiltinID, E);
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
@@ -4558,11 +5157,11 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
getTarget().getTriple().getArch());
}
-static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
- NeonTypeFlags TypeFlags,
- bool HasLegalHalfType = true,
- bool V1Ty = false,
- bool AllowBFloatArgsAndRet = true) {
+static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
+ NeonTypeFlags TypeFlags,
+ bool HasLegalHalfType = true,
+ bool V1Ty = false,
+ bool AllowBFloatArgsAndRet = true) {
int IsQuad = TypeFlags.isQuad();
switch (TypeFlags.getEltType()) {
case NeonTypeFlags::Int8:
@@ -5075,6 +5674,14 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vcltzq_v),
NEONMAP1(vclz_v, ctlz, Add1ArgType),
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
+ NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
@@ -5248,6 +5855,8 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
@@ -5265,6 +5874,8 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
@@ -5423,6 +6034,10 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
@@ -5583,8 +6198,8 @@ static Value *EmitCommonNeonSISDBuiltinExpr(
Value *Result = CGF.EmitNeonCall(F, Ops, s);
llvm::Type *ResultType = CGF.ConvertType(E->getType());
- if (ResultType->getPrimitiveSizeInBits() <
- Result->getType()->getPrimitiveSizeInBits())
+ if (ResultType->getPrimitiveSizeInBits().getFixedSize() <
+ Result->getType()->getPrimitiveSizeInBits().getFixedSize())
return CGF.Builder.CreateExtractElement(Result, C0);
return CGF.Builder.CreateBitCast(Result, ResultType, s);
@@ -5596,21 +6211,22 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
llvm::Triple::ArchType Arch) {
// Get the last argument, which specifies the vector type.
- llvm::APSInt NeonTypeConst;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
- if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
+ Optional<llvm::APSInt> NeonTypeConst =
+ Arg->getIntegerConstantExpr(getContext());
+ if (!NeonTypeConst)
return nullptr;
// Determine the type of this overloaded NEON intrinsic.
- NeonTypeFlags Type(NeonTypeConst.getZExtValue());
+ NeonTypeFlags Type(NeonTypeConst->getZExtValue());
bool Usgn = Type.isUnsigned();
bool Quad = Type.isQuad();
const bool HasLegalHalfType = getTarget().hasLegalHalfType();
const bool AllowBFloatArgsAndRet =
getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
- llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType, false,
- AllowBFloatArgsAndRet);
+ llvm::FixedVectorType *VTy =
+ GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -5633,7 +6249,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
NumElements = NumElements * 2;
if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
- NumElements = NumElements / 2;
+ NumElements = NumElements.divideCoefficientBy(2);
Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
@@ -5651,8 +6267,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
case NEON::BI__builtin_neon_vaddhn_v: {
- llvm::VectorType *SrcTy =
- llvm::VectorType::getExtendedElementVectorType(VTy);
+ llvm::FixedVectorType *SrcTy =
+ llvm::FixedVectorType::getExtendedElementVectorType(VTy);
// %sum = add <4 x i32> %lhs, %rhs
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
@@ -5924,14 +6540,16 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vmovl_v: {
- llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ llvm::FixedVectorType *DTy =
+ llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
if (Usgn)
return Builder.CreateZExt(Ops[0], Ty, "vmovl");
return Builder.CreateSExt(Ops[0], Ty, "vmovl");
}
case NEON::BI__builtin_neon_vmovn_v: {
- llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ llvm::FixedVectorType *QTy =
+ llvm::FixedVectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
}
@@ -5977,7 +6595,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vqdmulh_lane_v:
case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
- auto *RTy = cast<llvm::VectorType>(Ty);
+ auto *RTy = cast<llvm::FixedVectorType>(Ty);
if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
RTy = llvm::FixedVectorType::get(RTy->getElementType(),
@@ -6026,7 +6644,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
"vshl_n");
case NEON::BI__builtin_neon_vshll_n_v: {
- llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
+ llvm::FixedVectorType *SrcTy =
+ llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
if (Usgn)
Ops[0] = Builder.CreateZExt(Ops[0], VTy);
@@ -6036,7 +6655,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
}
case NEON::BI__builtin_neon_vshrn_n_v: {
- llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ llvm::FixedVectorType *SrcTy =
+ llvm::FixedVectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
if (Usgn)
@@ -6085,8 +6705,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vsubhn_v: {
- llvm::VectorType *SrcTy =
- llvm::VectorType::getExtendedElementVectorType(VTy);
+ llvm::FixedVectorType *SrcTy =
+ llvm::FixedVectorType::getExtendedElementVectorType(VTy);
// %sum = add <4 x i32> %lhs, %rhs
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
@@ -6225,28 +6845,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vbfdot_v:
case NEON::BI__builtin_neon_vbfdotq_v: {
llvm::Type *InputTy =
- llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
}
- case NEON::BI__builtin_neon_vbfmmlaq_v: {
- llvm::Type *InputTy =
- llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
- llvm::Type *Tys[2] = { Ty, InputTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmmla");
- }
- case NEON::BI__builtin_neon_vbfmlalbq_v: {
- llvm::Type *InputTy =
- llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
- llvm::Type *Tys[2] = { Ty, InputTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmlalb");
- }
- case NEON::BI__builtin_neon_vbfmlaltq_v: {
- llvm::Type *InputTy =
- llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
- llvm::Type *Tys[2] = { Ty, InputTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmlalt");
- }
case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
llvm::Type *Tys[1] = { Ty };
Function *F = CGM.getIntrinsic(Int, Tys);
@@ -6298,7 +6900,7 @@ static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
// Build a vector containing sequential number like (0, 1, 2, ..., 15)
SmallVector<int, 16> Indices;
- llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
+ auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
Indices.push_back(2*i);
Indices.push_back(2*i+1);
@@ -6810,6 +7412,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
AccessKind);
}
+ // Handle MSVC intrinsics before argument evaluation to prevent double
+ // evaluation.
+ if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
+ return EmitMSVCBuiltinExpr(*MsvcIntId, E);
+
// Deal with MVE builtins
if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
return Result;
@@ -6898,10 +7505,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
} else {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
- llvm::APSInt Result;
- bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
- assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
- Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ Ops.push_back(llvm::ConstantInt::get(
+ getLLVMContext(),
+ *E->getArg(i)->getIntegerConstantExpr(getContext())));
}
}
@@ -6971,150 +7577,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
Ops[3], Ops[4], Ops[5]});
}
- case ARM::BI_BitScanForward:
- case ARM::BI_BitScanForward64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
- case ARM::BI_BitScanReverse:
- case ARM::BI_BitScanReverse64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
-
- case ARM::BI_InterlockedAnd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
- case ARM::BI_InterlockedExchange64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
- case ARM::BI_InterlockedExchangeAdd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
- case ARM::BI_InterlockedExchangeSub64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
- case ARM::BI_InterlockedOr64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
- case ARM::BI_InterlockedXor64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
- case ARM::BI_InterlockedDecrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
- case ARM::BI_InterlockedIncrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
- case ARM::BI_InterlockedExchangeAdd8_acq:
- case ARM::BI_InterlockedExchangeAdd16_acq:
- case ARM::BI_InterlockedExchangeAdd_acq:
- case ARM::BI_InterlockedExchangeAdd64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
- case ARM::BI_InterlockedExchangeAdd8_rel:
- case ARM::BI_InterlockedExchangeAdd16_rel:
- case ARM::BI_InterlockedExchangeAdd_rel:
- case ARM::BI_InterlockedExchangeAdd64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
- case ARM::BI_InterlockedExchangeAdd8_nf:
- case ARM::BI_InterlockedExchangeAdd16_nf:
- case ARM::BI_InterlockedExchangeAdd_nf:
- case ARM::BI_InterlockedExchangeAdd64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
- case ARM::BI_InterlockedExchange8_acq:
- case ARM::BI_InterlockedExchange16_acq:
- case ARM::BI_InterlockedExchange_acq:
- case ARM::BI_InterlockedExchange64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
- case ARM::BI_InterlockedExchange8_rel:
- case ARM::BI_InterlockedExchange16_rel:
- case ARM::BI_InterlockedExchange_rel:
- case ARM::BI_InterlockedExchange64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
- case ARM::BI_InterlockedExchange8_nf:
- case ARM::BI_InterlockedExchange16_nf:
- case ARM::BI_InterlockedExchange_nf:
- case ARM::BI_InterlockedExchange64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
- case ARM::BI_InterlockedCompareExchange8_acq:
- case ARM::BI_InterlockedCompareExchange16_acq:
- case ARM::BI_InterlockedCompareExchange_acq:
- case ARM::BI_InterlockedCompareExchange64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
- case ARM::BI_InterlockedCompareExchange8_rel:
- case ARM::BI_InterlockedCompareExchange16_rel:
- case ARM::BI_InterlockedCompareExchange_rel:
- case ARM::BI_InterlockedCompareExchange64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
- case ARM::BI_InterlockedCompareExchange8_nf:
- case ARM::BI_InterlockedCompareExchange16_nf:
- case ARM::BI_InterlockedCompareExchange_nf:
- case ARM::BI_InterlockedCompareExchange64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
- case ARM::BI_InterlockedOr8_acq:
- case ARM::BI_InterlockedOr16_acq:
- case ARM::BI_InterlockedOr_acq:
- case ARM::BI_InterlockedOr64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
- case ARM::BI_InterlockedOr8_rel:
- case ARM::BI_InterlockedOr16_rel:
- case ARM::BI_InterlockedOr_rel:
- case ARM::BI_InterlockedOr64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
- case ARM::BI_InterlockedOr8_nf:
- case ARM::BI_InterlockedOr16_nf:
- case ARM::BI_InterlockedOr_nf:
- case ARM::BI_InterlockedOr64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
- case ARM::BI_InterlockedXor8_acq:
- case ARM::BI_InterlockedXor16_acq:
- case ARM::BI_InterlockedXor_acq:
- case ARM::BI_InterlockedXor64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
- case ARM::BI_InterlockedXor8_rel:
- case ARM::BI_InterlockedXor16_rel:
- case ARM::BI_InterlockedXor_rel:
- case ARM::BI_InterlockedXor64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
- case ARM::BI_InterlockedXor8_nf:
- case ARM::BI_InterlockedXor16_nf:
- case ARM::BI_InterlockedXor_nf:
- case ARM::BI_InterlockedXor64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
- case ARM::BI_InterlockedAnd8_acq:
- case ARM::BI_InterlockedAnd16_acq:
- case ARM::BI_InterlockedAnd_acq:
- case ARM::BI_InterlockedAnd64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
- case ARM::BI_InterlockedAnd8_rel:
- case ARM::BI_InterlockedAnd16_rel:
- case ARM::BI_InterlockedAnd_rel:
- case ARM::BI_InterlockedAnd64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
- case ARM::BI_InterlockedAnd8_nf:
- case ARM::BI_InterlockedAnd16_nf:
- case ARM::BI_InterlockedAnd_nf:
- case ARM::BI_InterlockedAnd64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
- case ARM::BI_InterlockedIncrement16_acq:
- case ARM::BI_InterlockedIncrement_acq:
- case ARM::BI_InterlockedIncrement64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
- case ARM::BI_InterlockedIncrement16_rel:
- case ARM::BI_InterlockedIncrement_rel:
- case ARM::BI_InterlockedIncrement64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
- case ARM::BI_InterlockedIncrement16_nf:
- case ARM::BI_InterlockedIncrement_nf:
- case ARM::BI_InterlockedIncrement64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
- case ARM::BI_InterlockedDecrement16_acq:
- case ARM::BI_InterlockedDecrement_acq:
- case ARM::BI_InterlockedDecrement64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
- case ARM::BI_InterlockedDecrement16_rel:
- case ARM::BI_InterlockedDecrement_rel:
- case ARM::BI_InterlockedDecrement64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
- case ARM::BI_InterlockedDecrement16_nf:
- case ARM::BI_InterlockedDecrement_nf:
- case ARM::BI_InterlockedDecrement64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
}
// Get the last argument, which specifies the vector type.
assert(HasExtraArg);
- llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs()-1);
- if (!Arg->isIntegerConstantExpr(Result, getContext()))
+ Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext());
+ if (!Result)
return nullptr;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
@@ -7127,7 +7596,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ty = DoubleTy;
// Determine whether this is an unsigned conversion or not.
- bool usgn = Result.getZExtValue() == 1;
+ bool usgn = Result->getZExtValue() == 1;
unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
// Call the appropriate intrinsic.
@@ -7136,14 +7605,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
// Determine the type of this overloaded NEON intrinsic.
- NeonTypeFlags Type(Result.getZExtValue());
+ NeonTypeFlags Type = Result->getZExtValue();
bool usgn = Type.isUnsigned();
bool rightShift = false;
- llvm::VectorType *VTy = GetNeonType(this, Type,
- getTarget().hasLegalHalfType(),
- false,
- getTarget().hasBFloat16Type());
+ llvm::FixedVectorType *VTy =
+ GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
+ getTarget().hasBFloat16Type());
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -7280,11 +7748,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
template<typename Integer>
static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
- llvm::APSInt IntVal;
- bool IsConst = E->isIntegerConstantExpr(IntVal, Context);
- assert(IsConst && "Sema should have checked this was a constant");
- (void)IsConst;
- return IntVal.getExtValue();
+ return E->getIntegerConstantExpr(Context)->getExtValue();
}
static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
@@ -7355,11 +7819,10 @@ static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd)
// or odds, as desired).
SmallVector<int, 16> Indices;
unsigned InputElements =
- cast<llvm::VectorType>(V->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(V->getType())->getNumElements();
for (unsigned i = 0; i < InputElements; i += 2)
Indices.push_back(i + Odd);
- return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
- Indices);
+ return Builder.CreateShuffleVector(V, Indices);
}
static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
@@ -7368,7 +7831,7 @@ static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
assert(V0->getType() == V1->getType() && "Can't zip different vector types");
SmallVector<int, 16> Indices;
unsigned InputElements =
- cast<llvm::VectorType>(V0->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
for (unsigned i = 0; i < InputElements; i++) {
Indices.push_back(i);
Indices.push_back(i + InputElements);
@@ -7400,8 +7863,7 @@ static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
unsigned Mask = ReverseWidth / LaneSize - 1;
for (unsigned i = 0; i < Elements; i++)
Indices.push_back(i ^ Mask);
- return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
- Indices);
+ return Builder.CreateShuffleVector(V, Indices);
}
Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
@@ -7557,14 +8019,14 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
assert(E->getNumArgs() >= 3);
// Get the last argument, which specifies the vector type.
- llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
- if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
+ Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext());
+ if (!Result)
return nullptr;
// Determine the type of this overloaded NEON intrinsic.
- NeonTypeFlags Type(Result.getZExtValue());
- llvm::VectorType *Ty = GetNeonType(&CGF, Type);
+ NeonTypeFlags Type = Result->getZExtValue();
+ llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
if (!Ty)
return nullptr;
@@ -8240,15 +8702,15 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
else {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
- llvm::APSInt Result;
- if (!E->getArg(i)->isIntegerConstantExpr(Result, getContext()))
- llvm_unreachable("Expected argument to be a constant");
+ Optional<llvm::APSInt> Result =
+ E->getArg(i)->getIntegerConstantExpr(getContext());
+ assert(Result && "Expected argument to be a constant");
// Immediates for SVE llvm intrinsics are always 32bit. We can safely
// truncate because the immediate has been range checked and no valid
// immediate requires more than a handful of bits.
- Result = Result.extOrTrunc(32);
- Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ *Result = Result->extOrTrunc(32);
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
}
}
@@ -8465,7 +8927,8 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
case SVE::BI__builtin_sve_svlen_u64: {
SVETypeFlags TF(Builtin->TypeModifier);
auto VTy = cast<llvm::VectorType>(getSVEType(TF));
- auto NumEls = llvm::ConstantInt::get(Ty, VTy->getElementCount().Min);
+ auto *NumEls =
+ llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
return Builder.CreateMul(NumEls, Builder.CreateCall(F));
@@ -8485,8 +8948,7 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
case SVE::BI__builtin_sve_svtbl2_f64: {
SVETypeFlags TF(Builtin->TypeModifier);
auto VTy = cast<llvm::VectorType>(getSVEType(TF));
- auto TupleTy = llvm::VectorType::get(VTy->getElementType(),
- VTy->getElementCount() * 2);
+ auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy);
Function *FExtr =
CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
@@ -8597,6 +9059,46 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
}
+ if (BuiltinID == AArch64::BI__builtin_arm_ld64b ||
+ BuiltinID == AArch64::BI__builtin_arm_st64b ||
+ BuiltinID == AArch64::BI__builtin_arm_st64bv ||
+ BuiltinID == AArch64::BI__builtin_arm_st64bv0) {
+ llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0));
+ llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1));
+
+ if (BuiltinID == AArch64::BI__builtin_arm_ld64b) {
+ // Load from the address via an LLVM intrinsic, receiving a
+ // tuple of 8 i64 words, and store each one to ValPtr.
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
+ llvm::Value *Val = Builder.CreateCall(F, MemAddr);
+ llvm::Value *ToRet;
+ for (size_t i = 0; i < 8; i++) {
+ llvm::Value *ValOffsetPtr = Builder.CreateGEP(ValPtr, Builder.getInt32(i));
+ Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
+ ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
+ }
+ return ToRet;
+ } else {
+ // Load 8 i64 words from ValPtr, and store them to the address
+ // via an LLVM intrinsic.
+ SmallVector<llvm::Value *, 9> Args;
+ Args.push_back(MemAddr);
+ for (size_t i = 0; i < 8; i++) {
+ llvm::Value *ValOffsetPtr = Builder.CreateGEP(ValPtr, Builder.getInt32(i));
+ Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
+ Args.push_back(Builder.CreateLoad(Addr));
+ }
+
+ auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b
+ ? Intrinsic::aarch64_st64b
+ : BuiltinID == AArch64::BI__builtin_arm_st64bv
+ ? Intrinsic::aarch64_st64bv
+ : Intrinsic::aarch64_st64bv0);
+ Function *F = CGM.getIntrinsic(Intr);
+ return Builder.CreateCall(F, Args);
+ }
+ }
+
if (BuiltinID == AArch64::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
@@ -8918,6 +9420,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F);
}
+ // Handle MSVC intrinsics before argument evaluation to prevent double
+ // evaluation.
+ if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID))
+ return EmitMSVCBuiltinExpr(*MsvcIntId, E);
+
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
@@ -8952,11 +9459,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
} else {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
- llvm::APSInt Result;
- bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
- assert(IsConst && "Constant arg isn't actually constant?");
- (void)IsConst;
- Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ Ops.push_back(llvm::ConstantInt::get(
+ getLLVMContext(),
+ *E->getArg(i)->getIntegerConstantExpr(getContext())));
}
}
@@ -8971,12 +9476,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Result;
}
- llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs()-1);
NeonTypeFlags Type(0);
- if (Arg->isIntegerConstantExpr(Result, getContext()))
+ if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()))
// Determine the type of this overloaded NEON intrinsic.
- Type = NeonTypeFlags(Result.getZExtValue());
+ Type = NeonTypeFlags(Result->getZExtValue());
bool usgn = Type.isUnsigned();
bool quad = Type.isQuad();
@@ -8999,21 +9503,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
}
- case NEON::BI__builtin_neon_vcvts_u32_f32:
- case NEON::BI__builtin_neon_vcvtd_u64_f64:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcvts_s32_f32:
- case NEON::BI__builtin_neon_vcvtd_s64_f64: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
- llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
- llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
- Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
- if (usgn)
- return Builder.CreateFPToUI(Ops[0], InTy);
- return Builder.CreateFPToSI(Ops[0], InTy);
- }
case NEON::BI__builtin_neon_vcvts_f32_u32:
case NEON::BI__builtin_neon_vcvtd_f64_u64:
usgn = true;
@@ -9051,44 +9540,16 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateUIToFP(Ops[0], FTy);
return Builder.CreateSIToFP(Ops[0], FTy);
}
- case NEON::BI__builtin_neon_vcvth_u16_f16:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcvth_s16_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
- if (usgn)
- return Builder.CreateFPToUI(Ops[0], Int16Ty);
- return Builder.CreateFPToSI(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vcvth_u32_f16:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcvth_s32_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
- if (usgn)
- return Builder.CreateFPToUI(Ops[0], Int32Ty);
- return Builder.CreateFPToSI(Ops[0], Int32Ty);
- }
- case NEON::BI__builtin_neon_vcvth_u64_f16:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcvth_s64_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
- if (usgn)
- return Builder.CreateFPToUI(Ops[0], Int64Ty);
- return Builder.CreateFPToSI(Ops[0], Int64Ty);
- }
case NEON::BI__builtin_neon_vcvtah_u16_f16:
case NEON::BI__builtin_neon_vcvtmh_u16_f16:
case NEON::BI__builtin_neon_vcvtnh_u16_f16:
case NEON::BI__builtin_neon_vcvtph_u16_f16:
+ case NEON::BI__builtin_neon_vcvth_u16_f16:
case NEON::BI__builtin_neon_vcvtah_s16_f16:
case NEON::BI__builtin_neon_vcvtmh_s16_f16:
case NEON::BI__builtin_neon_vcvtnh_s16_f16:
- case NEON::BI__builtin_neon_vcvtph_s16_f16: {
+ case NEON::BI__builtin_neon_vcvtph_s16_f16:
+ case NEON::BI__builtin_neon_vcvth_s16_f16: {
unsigned Int;
llvm::Type* InTy = Int32Ty;
llvm::Type* FTy = HalfTy;
@@ -9104,6 +9565,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fcvtnu; break;
case NEON::BI__builtin_neon_vcvtph_u16_f16:
Int = Intrinsic::aarch64_neon_fcvtpu; break;
+ case NEON::BI__builtin_neon_vcvth_u16_f16:
+ Int = Intrinsic::aarch64_neon_fcvtzu; break;
case NEON::BI__builtin_neon_vcvtah_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtas; break;
case NEON::BI__builtin_neon_vcvtmh_s16_f16:
@@ -9112,6 +9575,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fcvtns; break;
case NEON::BI__builtin_neon_vcvtph_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtps; break;
+ case NEON::BI__builtin_neon_vcvth_s16_f16:
+ Int = Intrinsic::aarch64_neon_fcvtzs; break;
}
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -9661,142 +10126,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
}
- case AArch64::BI_BitScanForward:
- case AArch64::BI_BitScanForward64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
- case AArch64::BI_BitScanReverse:
- case AArch64::BI_BitScanReverse64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
- case AArch64::BI_InterlockedAnd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
- case AArch64::BI_InterlockedExchange64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
- case AArch64::BI_InterlockedExchangeAdd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
- case AArch64::BI_InterlockedExchangeSub64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
- case AArch64::BI_InterlockedOr64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
- case AArch64::BI_InterlockedXor64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
- case AArch64::BI_InterlockedDecrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
- case AArch64::BI_InterlockedIncrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
- case AArch64::BI_InterlockedExchangeAdd8_acq:
- case AArch64::BI_InterlockedExchangeAdd16_acq:
- case AArch64::BI_InterlockedExchangeAdd_acq:
- case AArch64::BI_InterlockedExchangeAdd64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
- case AArch64::BI_InterlockedExchangeAdd8_rel:
- case AArch64::BI_InterlockedExchangeAdd16_rel:
- case AArch64::BI_InterlockedExchangeAdd_rel:
- case AArch64::BI_InterlockedExchangeAdd64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
- case AArch64::BI_InterlockedExchangeAdd8_nf:
- case AArch64::BI_InterlockedExchangeAdd16_nf:
- case AArch64::BI_InterlockedExchangeAdd_nf:
- case AArch64::BI_InterlockedExchangeAdd64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
- case AArch64::BI_InterlockedExchange8_acq:
- case AArch64::BI_InterlockedExchange16_acq:
- case AArch64::BI_InterlockedExchange_acq:
- case AArch64::BI_InterlockedExchange64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
- case AArch64::BI_InterlockedExchange8_rel:
- case AArch64::BI_InterlockedExchange16_rel:
- case AArch64::BI_InterlockedExchange_rel:
- case AArch64::BI_InterlockedExchange64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
- case AArch64::BI_InterlockedExchange8_nf:
- case AArch64::BI_InterlockedExchange16_nf:
- case AArch64::BI_InterlockedExchange_nf:
- case AArch64::BI_InterlockedExchange64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
- case AArch64::BI_InterlockedCompareExchange8_acq:
- case AArch64::BI_InterlockedCompareExchange16_acq:
- case AArch64::BI_InterlockedCompareExchange_acq:
- case AArch64::BI_InterlockedCompareExchange64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
- case AArch64::BI_InterlockedCompareExchange8_rel:
- case AArch64::BI_InterlockedCompareExchange16_rel:
- case AArch64::BI_InterlockedCompareExchange_rel:
- case AArch64::BI_InterlockedCompareExchange64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
- case AArch64::BI_InterlockedCompareExchange8_nf:
- case AArch64::BI_InterlockedCompareExchange16_nf:
- case AArch64::BI_InterlockedCompareExchange_nf:
- case AArch64::BI_InterlockedCompareExchange64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
- case AArch64::BI_InterlockedOr8_acq:
- case AArch64::BI_InterlockedOr16_acq:
- case AArch64::BI_InterlockedOr_acq:
- case AArch64::BI_InterlockedOr64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
- case AArch64::BI_InterlockedOr8_rel:
- case AArch64::BI_InterlockedOr16_rel:
- case AArch64::BI_InterlockedOr_rel:
- case AArch64::BI_InterlockedOr64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
- case AArch64::BI_InterlockedOr8_nf:
- case AArch64::BI_InterlockedOr16_nf:
- case AArch64::BI_InterlockedOr_nf:
- case AArch64::BI_InterlockedOr64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
- case AArch64::BI_InterlockedXor8_acq:
- case AArch64::BI_InterlockedXor16_acq:
- case AArch64::BI_InterlockedXor_acq:
- case AArch64::BI_InterlockedXor64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
- case AArch64::BI_InterlockedXor8_rel:
- case AArch64::BI_InterlockedXor16_rel:
- case AArch64::BI_InterlockedXor_rel:
- case AArch64::BI_InterlockedXor64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
- case AArch64::BI_InterlockedXor8_nf:
- case AArch64::BI_InterlockedXor16_nf:
- case AArch64::BI_InterlockedXor_nf:
- case AArch64::BI_InterlockedXor64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
- case AArch64::BI_InterlockedAnd8_acq:
- case AArch64::BI_InterlockedAnd16_acq:
- case AArch64::BI_InterlockedAnd_acq:
- case AArch64::BI_InterlockedAnd64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
- case AArch64::BI_InterlockedAnd8_rel:
- case AArch64::BI_InterlockedAnd16_rel:
- case AArch64::BI_InterlockedAnd_rel:
- case AArch64::BI_InterlockedAnd64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
- case AArch64::BI_InterlockedAnd8_nf:
- case AArch64::BI_InterlockedAnd16_nf:
- case AArch64::BI_InterlockedAnd_nf:
- case AArch64::BI_InterlockedAnd64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
- case AArch64::BI_InterlockedIncrement16_acq:
- case AArch64::BI_InterlockedIncrement_acq:
- case AArch64::BI_InterlockedIncrement64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
- case AArch64::BI_InterlockedIncrement16_rel:
- case AArch64::BI_InterlockedIncrement_rel:
- case AArch64::BI_InterlockedIncrement64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
- case AArch64::BI_InterlockedIncrement16_nf:
- case AArch64::BI_InterlockedIncrement_nf:
- case AArch64::BI_InterlockedIncrement64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
- case AArch64::BI_InterlockedDecrement16_acq:
- case AArch64::BI_InterlockedDecrement_acq:
- case AArch64::BI_InterlockedDecrement64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
- case AArch64::BI_InterlockedDecrement16_rel:
- case AArch64::BI_InterlockedDecrement_rel:
- case AArch64::BI_InterlockedDecrement64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
- case AArch64::BI_InterlockedDecrement16_nf:
- case AArch64::BI_InterlockedDecrement_nf:
- case AArch64::BI_InterlockedDecrement64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
case AArch64::BI_InterlockedAdd: {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
@@ -9808,7 +10137,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
}
- llvm::VectorType *VTy = GetNeonType(this, Type);
+ llvm::FixedVectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -9869,13 +10198,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
}
case NEON::BI__builtin_neon_vfma_laneq_v: {
- llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ auto *VTy = cast<llvm::FixedVectorType>(Ty);
// v1f64 fma should be mapped to Neon scalar f64 fma
if (VTy && VTy->getElementType() == DoubleTy) {
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
- llvm::Type *VTy = GetNeonType(this,
- NeonTypeFlags(NeonTypeFlags::Float64, false, true));
+ llvm::FixedVectorType *VTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
Value *Result;
@@ -10152,10 +10481,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvtq_u64_v:
case NEON::BI__builtin_neon_vcvtq_s16_v:
case NEON::BI__builtin_neon_vcvtq_u16_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
- if (usgn)
- return Builder.CreateFPToUI(Ops[0], Ty);
- return Builder.CreateFPToSI(Ops[0], Ty);
+ Int =
+ usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
+ llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
}
case NEON::BI__builtin_neon_vcvta_s16_v:
case NEON::BI__builtin_neon_vcvta_u16_v:
@@ -10243,8 +10572,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
Quad = true;
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
- llvm::Type *VTy = GetNeonType(this,
- NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
+ llvm::FixedVectorType *VTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
@@ -10760,8 +11089,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vld2q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
- Ops.push_back(Ops[1]);
- Ops.erase(Ops.begin()+1);
+ std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
@@ -10774,8 +11102,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vld3q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
- Ops.push_back(Ops[1]);
- Ops.erase(Ops.begin()+1);
+ std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
@@ -10789,8 +11116,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vld4q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
- Ops.push_back(Ops[1]);
- Ops.erase(Ops.begin()+1);
+ std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
@@ -10803,16 +11129,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
@@ -10820,16 +11144,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
@@ -10837,16 +11159,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
@@ -10956,9 +11276,16 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
- BuiltinID == BPF::BI__builtin_btf_type_id) &&
+ BuiltinID == BPF::BI__builtin_btf_type_id ||
+ BuiltinID == BPF::BI__builtin_preserve_type_info ||
+ BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin");
+ // A sequence number, injected into IR builtin functions, to
+ // prevent CSE given the only difference of the funciton
+ // may just be the debuginfo metadata.
+ static uint32_t BuiltinSeqNum;
+
switch (BuiltinID) {
default:
llvm_unreachable("Unexpected BPF builtin");
@@ -10989,65 +11316,65 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
{FieldAddr->getType()});
return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
}
- case BPF::BI__builtin_btf_type_id: {
- Value *FieldVal = nullptr;
-
- // The LValue cannot be converted Value in order to be used as the function
- // parameter. If it is a structure, it is the "alloca" result of the LValue
- // (a pointer) is used in the parameter. If it is a simple type,
- // the value will be loaded from its corresponding "alloca" and used as
- // the parameter. In our case, let us just get a pointer of the LValue
- // since we do not really use the parameter. The purpose of parameter
- // is to prevent the generated IR llvm.bpf.btf.type.id intrinsic call,
- // which carries metadata, from being changed.
- bool IsLValue = E->getArg(0)->isLValue();
- if (IsLValue)
- FieldVal = EmitLValue(E->getArg(0)).getPointer(*this);
- else
- FieldVal = EmitScalarExpr(E->getArg(0));
+ case BPF::BI__builtin_btf_type_id:
+ case BPF::BI__builtin_preserve_type_info: {
+ if (!getDebugInfo()) {
+ CGM.Error(E->getExprLoc(), "using builtin function without -g");
+ return nullptr;
+ }
+
+ const Expr *Arg0 = E->getArg(0);
+ llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
+ Arg0->getType(), Arg0->getExprLoc());
+
+ ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+ Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
+ Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
+ llvm::Function *FnDecl;
+ if (BuiltinID == BPF::BI__builtin_btf_type_id)
+ FnDecl = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
+ else
+ FnDecl = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
+ CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
+ Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
+ return Fn;
+ }
+ case BPF::BI__builtin_preserve_enum_value: {
if (!getDebugInfo()) {
- CGM.Error(E->getExprLoc(), "using __builtin_btf_type_id() without -g");
+ CGM.Error(E->getExprLoc(), "using builtin function without -g");
return nullptr;
}
- // Generate debuginfo type for the first argument.
- llvm::DIType *DbgInfo =
- getDebugInfo()->getOrCreateStandaloneType(E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc());
+ const Expr *Arg0 = E->getArg(0);
+ llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
+ Arg0->getType(), Arg0->getExprLoc());
+
+ // Find enumerator
+ const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
+ const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
+ const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
+ const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
+
+ auto &InitVal = Enumerator->getInitVal();
+ std::string InitValStr;
+ if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX))
+ InitValStr = std::to_string(InitVal.getSExtValue());
+ else
+ InitValStr = std::to_string(InitVal.getZExtValue());
+ std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
+ Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
+ Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
- // Built the IR for the btf_type_id intrinsic.
- //
- // In the above, we converted LValue argument to a pointer to LValue.
- // For example, the following
- // int v;
- // C1: __builtin_btf_type_id(v, flag);
- // will be converted to
- // L1: llvm.bpf.btf.type.id(&v, flag)
- // This makes it hard to differentiate from
- // C2: __builtin_btf_type_id(&v, flag);
- // to
- // L2: llvm.bpf.btf.type.id(&v, flag)
- //
- // If both C1 and C2 are present in the code, the llvm may later
- // on do CSE on L1 and L2, which will result in incorrect tagged types.
- //
- // The C1->L1 transformation only happens if the argument of
- // __builtin_btf_type_id() is a LValue. So Let us put whether
- // the argument is an LValue or not into generated IR. This should
- // prevent potential CSE from causing debuginfo type loss.
- //
- // The generated IR intrinsics will hence look like
- // L1: llvm.bpf.btf.type.id(&v, 1, flag) !di_type_for_{v};
- // L2: llvm.bpf.btf.type.id(&v, 0, flag) !di_type_for_{&v};
- Constant *CV = ConstantInt::get(IntTy, IsLValue);
- llvm::Function *FnBtfTypeId = llvm::Intrinsic::getDeclaration(
- &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id,
- {FieldVal->getType(), CV->getType()});
- CallInst *Fn = Builder.CreateCall(FnBtfTypeId, {FieldVal, CV, FlagValue});
+ llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
+ CallInst *Fn =
+ Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
return Fn;
}
@@ -11109,7 +11436,8 @@ static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
llvm::PointerType::getUnqual(Ops[1]->getType()));
Value *MaskVec = getMaskVecValue(
- CGF, Ops[2], cast<llvm::VectorType>(Ops[1]->getType())->getNumElements());
+ CGF, Ops[2],
+ cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
}
@@ -11121,7 +11449,8 @@ static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
llvm::PointerType::getUnqual(Ops[1]->getType()));
Value *MaskVec = getMaskVecValue(
- CGF, Ops[2], cast<llvm::VectorType>(Ops[1]->getType())->getNumElements());
+ CGF, Ops[2],
+ cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
}
@@ -11135,7 +11464,8 @@ static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(PtrTy));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
+ Value *MaskVec = getMaskVecValue(
+ CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
ResultTy);
@@ -11145,7 +11475,7 @@ static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
ArrayRef<Value *> Ops,
bool IsCompress) {
- auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
+ auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
@@ -11157,7 +11487,7 @@ static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
static Value *EmitX86CompressStore(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
- auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
+ auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
llvm::Type *PtrTy = ResultTy->getElementType();
// Cast the pointer to element type.
@@ -11193,7 +11523,7 @@ static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
// Funnel shifts amounts are treated as modulo and types are all power-of-2 so
// we only care about the lowest log2 bits anyway.
if (Amt->getType() != Ty) {
- unsigned NumElts = cast<llvm::VectorType>(Ty)->getNumElements();
+ unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
}
@@ -11252,7 +11582,7 @@ static Value *EmitX86Select(CodeGenFunction &CGF,
return Op0;
Mask = getMaskVecValue(
- CGF, Mask, cast<llvm::VectorType>(Op0->getType())->getNumElements());
+ CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
}
@@ -11299,7 +11629,7 @@ static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
assert((Ops.size() == 2 || Ops.size() == 4) &&
"Unexpected number of arguments");
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Value *Cmp;
if (CC == 3) {
@@ -11353,25 +11683,6 @@ static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
}
-static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
-
- llvm::Type *Ty = Ops[0]->getType();
- Value *Zero = llvm::Constant::getNullValue(Ty);
- Value *Sub = CGF.Builder.CreateSub(Zero, Ops[0]);
- Value *Cmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Zero);
- Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Sub);
- return Res;
-}
-
-static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
- ArrayRef<Value *> Ops) {
- Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
- Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
-
- assert(Ops.size() == 2);
- return Res;
-}
-
// Lowers X86 FMA intrinsics to IR.
static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
unsigned BuiltinID, bool IsAddSub) {
@@ -11576,18 +11887,15 @@ static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
llvm::Type *DstTy) {
- unsigned NumberOfElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ unsigned NumberOfElements =
+ cast<llvm::FixedVectorType>(DstTy)->getNumElements();
Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
}
-// Emit addition or subtraction with signed/unsigned saturation.
-static Value *EmitX86AddSubSatExpr(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops, bool IsSigned,
- bool IsAddition) {
- Intrinsic::ID IID =
- IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat)
- : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat);
+// Emit binary intrinsic with the same type used in result/args.
+static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
+ ArrayRef<Value *> Ops, Intrinsic::ID IID) {
llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
}
@@ -11612,14 +11920,14 @@ static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
}
- unsigned NumDstElts = cast<llvm::VectorType>(DstTy)->getNumElements();
+ unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
Value *Src = Ops[0];
// Extract the subvector.
- if (NumDstElts != cast<llvm::VectorType>(Src->getType())->getNumElements()) {
+ if (NumDstElts !=
+ cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
assert(NumDstElts == 4 && "Unexpected vector size");
- Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()),
- ArrayRef<int>{0, 1, 2, 3});
+ Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3});
}
// Bitcast from vXi16 to vXf16.
@@ -11790,7 +12098,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
if (BuiltinID == X86::BI__builtin_cpu_init)
return EmitX86CpuInit();
+ // Handle MSVC intrinsics before argument evaluation to prevent double
+ // evaluation.
+ if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
+ return EmitMSVCBuiltinExpr(*MsvcIntId, E);
+
SmallVector<Value*, 4> Ops;
+ bool IsMaskFCmp = false;
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
@@ -11807,10 +12121,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
- llvm::APSInt Result;
- bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
- assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
- Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ Ops.push_back(llvm::ConstantInt::get(
+ getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
}
// These exist so that the builtin that takes an immediate can be bounds
@@ -11916,7 +12228,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vec_ext_v8si:
case X86::BI__builtin_ia32_vec_ext_v4di: {
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
Index &= NumElts - 1;
// These builtins exist so we can ensure the index is an ICE and in range.
@@ -11932,7 +12244,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vec_set_v8si:
case X86::BI__builtin_ia32_vec_set_v4di: {
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
Index &= NumElts - 1;
// These builtins exist so we can ensure the index is an ICE and in range.
@@ -12358,9 +12670,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
- unsigned MinElts =
- std::min(cast<llvm::VectorType>(Ops[0]->getType())->getNumElements(),
- cast<llvm::VectorType>(Ops[2]->getType())->getNumElements());
+ unsigned MinElts = std::min(
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
+ cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, Ops);
@@ -12467,9 +12779,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
- unsigned MinElts =
- std::min(cast<llvm::VectorType>(Ops[2]->getType())->getNumElements(),
- cast<llvm::VectorType>(Ops[3]->getType())->getNumElements());
+ unsigned MinElts = std::min(
+ cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
+ cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, Ops);
@@ -12491,10 +12803,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_extracti64x2_256_mask:
case X86::BI__builtin_ia32_extractf64x2_512_mask:
case X86::BI__builtin_ia32_extracti64x2_512_mask: {
- auto *DstTy = cast<llvm::VectorType>(ConvertType(E->getType()));
+ auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
unsigned NumElts = DstTy->getNumElements();
unsigned SrcNumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned SubVectors = SrcNumElts / NumElts;
unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
@@ -12506,7 +12818,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Indices[i] = i + Index;
Value *Res = Builder.CreateShuffleVector(Ops[0],
- UndefValue::get(Ops[0]->getType()),
makeArrayRef(Indices, NumElts),
"extract");
@@ -12532,9 +12843,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_insertf64x2_512:
case X86::BI__builtin_ia32_inserti64x2_512: {
unsigned DstNumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned SrcNumElts =
- cast<llvm::VectorType>(Ops[1]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
unsigned SubVectors = DstNumElts / SrcNumElts;
unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
@@ -12546,7 +12857,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
Value *Op1 = Builder.CreateShuffleVector(Ops[1],
- UndefValue::get(Ops[1]->getType()),
makeArrayRef(Indices, DstNumElts),
"widen");
@@ -12599,7 +12909,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pblendd128:
case X86::BI__builtin_ia32_pblendd256: {
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
int Indices[16];
@@ -12616,7 +12926,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pshuflw256:
case X86::BI__builtin_ia32_pshuflw512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
@@ -12632,15 +12942,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Indices[l + i] = l + i;
}
- return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
- makeArrayRef(Indices, NumElts),
+ return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
"pshuflw");
}
case X86::BI__builtin_ia32_pshufhw:
case X86::BI__builtin_ia32_pshufhw256:
case X86::BI__builtin_ia32_pshufhw512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
@@ -12656,8 +12965,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
- return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
- makeArrayRef(Indices, NumElts),
+ return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
"pshufhw");
}
case X86::BI__builtin_ia32_pshufd:
@@ -12670,7 +12978,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vpermilpd512:
case X86::BI__builtin_ia32_vpermilps512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
@@ -12686,8 +12994,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
- return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
- makeArrayRef(Indices, NumElts),
+ return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
"permil");
}
case X86::BI__builtin_ia32_shufpd:
@@ -12697,7 +13004,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_shufps256:
case X86::BI__builtin_ia32_shufps512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
@@ -12725,7 +13032,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_permdi512:
case X86::BI__builtin_ia32_permdf512: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
// These intrinsics operate on 256-bit lanes of four 64-bit elements.
@@ -12734,8 +13041,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i != 4; ++i)
Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
- return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
- makeArrayRef(Indices, NumElts),
+ return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
"perm");
}
case X86::BI__builtin_ia32_palignr128:
@@ -12744,7 +13050,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
assert(NumElts % 16 == 0);
// If palignr is shifting the pair of vectors more than the size of two
@@ -12782,7 +13088,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_alignq256:
case X86::BI__builtin_ia32_alignq512: {
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
// Mask the shift amount to width of two vectors.
@@ -12805,7 +13111,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_shuf_i32x4:
case X86::BI__builtin_ia32_shuf_i64x2: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
unsigned NumLaneElts = NumElts / NumLanes;
@@ -12832,7 +13138,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_permti256: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
// This takes a very simple approach since there are two lanes and a
// shuffle can have 2 inputs. So we reserve the first input for the first
@@ -12870,7 +13176,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pslldqi256_byteshift:
case X86::BI__builtin_ia32_pslldqi512_byteshift: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
- auto *ResultType = cast<llvm::VectorType>(Ops[0]->getType());
+ auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
// Builtin type is vXi64 so multiply by 8 to get bytes.
unsigned NumElts = ResultType->getNumElements() * 8;
@@ -12900,7 +13206,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrldqi256_byteshift:
case X86::BI__builtin_ia32_psrldqi512_byteshift: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
- auto *ResultType = cast<llvm::VectorType>(Ops[0]->getType());
+ auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
// Builtin type is vXi64 so multiply by 8 to get bytes.
unsigned NumElts = ResultType->getNumElements() * 8;
@@ -13342,9 +13648,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pabsb512:
case X86::BI__builtin_ia32_pabsw512:
case X86::BI__builtin_ia32_pabsd512:
- case X86::BI__builtin_ia32_pabsq512:
- return EmitX86Abs(*this, Ops);
-
+ case X86::BI__builtin_ia32_pabsq512: {
+ Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ }
case X86::BI__builtin_ia32_pmaxsb128:
case X86::BI__builtin_ia32_pmaxsw128:
case X86::BI__builtin_ia32_pmaxsd128:
@@ -13357,7 +13664,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pmaxsw512:
case X86::BI__builtin_ia32_pmaxsd512:
case X86::BI__builtin_ia32_pmaxsq512:
- return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops);
+ return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
case X86::BI__builtin_ia32_pmaxub128:
case X86::BI__builtin_ia32_pmaxuw128:
case X86::BI__builtin_ia32_pmaxud128:
@@ -13370,7 +13677,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pmaxuw512:
case X86::BI__builtin_ia32_pmaxud512:
case X86::BI__builtin_ia32_pmaxuq512:
- return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops);
+ return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
case X86::BI__builtin_ia32_pminsb128:
case X86::BI__builtin_ia32_pminsw128:
case X86::BI__builtin_ia32_pminsd128:
@@ -13383,7 +13690,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pminsw512:
case X86::BI__builtin_ia32_pminsd512:
case X86::BI__builtin_ia32_pminsq512:
- return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops);
+ return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
case X86::BI__builtin_ia32_pminub128:
case X86::BI__builtin_ia32_pminuw128:
case X86::BI__builtin_ia32_pminud128:
@@ -13396,7 +13703,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pminuw512:
case X86::BI__builtin_ia32_pminud512:
case X86::BI__builtin_ia32_pminuq512:
- return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
+ return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
case X86::BI__builtin_ia32_pmuludq128:
case X86::BI__builtin_ia32_pmuludq256:
@@ -13470,6 +13777,68 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Ops 0 and 1 are swapped.
return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
+ // Reductions
+ case X86::BI__builtin_ia32_reduce_add_d512:
+ case X86::BI__builtin_ia32_reduce_add_q512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0]});
+ }
+ case X86::BI__builtin_ia32_reduce_and_d512:
+ case X86::BI__builtin_ia32_reduce_and_q512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0]});
+ }
+ case X86::BI__builtin_ia32_reduce_fadd_pd512:
+ case X86::BI__builtin_ia32_reduce_fadd_ps512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
+ return Builder.CreateCall(F, {Ops[0], Ops[1]});
+ }
+ case X86::BI__builtin_ia32_reduce_fmul_pd512:
+ case X86::BI__builtin_ia32_reduce_fmul_ps512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
+ return Builder.CreateCall(F, {Ops[0], Ops[1]});
+ }
+ case X86::BI__builtin_ia32_reduce_mul_d512:
+ case X86::BI__builtin_ia32_reduce_mul_q512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0]});
+ }
+ case X86::BI__builtin_ia32_reduce_or_d512:
+ case X86::BI__builtin_ia32_reduce_or_q512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0]});
+ }
+ case X86::BI__builtin_ia32_reduce_smax_d512:
+ case X86::BI__builtin_ia32_reduce_smax_q512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0]});
+ }
+ case X86::BI__builtin_ia32_reduce_smin_d512:
+ case X86::BI__builtin_ia32_reduce_smin_q512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0]});
+ }
+ case X86::BI__builtin_ia32_reduce_umax_d512:
+ case X86::BI__builtin_ia32_reduce_umax_q512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0]});
+ }
+ case X86::BI__builtin_ia32_reduce_umin_d512:
+ case X86::BI__builtin_ia32_reduce_umin_q512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0]});
+ }
+
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
@@ -13547,7 +13916,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_fpclasspd256_mask:
case X86::BI__builtin_ia32_fpclasspd512_mask: {
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Value *MaskIn = Ops[2];
Ops.erase(&Ops[2]);
@@ -13585,7 +13954,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vp2intersect_d_256:
case X86::BI__builtin_ia32_vp2intersect_d_128: {
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Intrinsic::ID ID;
switch (BuiltinID) {
@@ -13644,7 +14013,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Value *MaskIn = Ops[2];
Ops.erase(&Ops[2]);
@@ -13691,21 +14060,22 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cmpordps:
case X86::BI__builtin_ia32_cmpordpd:
return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
- case X86::BI__builtin_ia32_cmpps:
- case X86::BI__builtin_ia32_cmpps256:
- case X86::BI__builtin_ia32_cmppd:
- case X86::BI__builtin_ia32_cmppd256:
case X86::BI__builtin_ia32_cmpps128_mask:
case X86::BI__builtin_ia32_cmpps256_mask:
case X86::BI__builtin_ia32_cmpps512_mask:
case X86::BI__builtin_ia32_cmppd128_mask:
case X86::BI__builtin_ia32_cmppd256_mask:
- case X86::BI__builtin_ia32_cmppd512_mask: {
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ IsMaskFCmp = true;
+ LLVM_FALLTHROUGH;
+ case X86::BI__builtin_ia32_cmpps:
+ case X86::BI__builtin_ia32_cmpps256:
+ case X86::BI__builtin_ia32_cmppd:
+ case X86::BI__builtin_ia32_cmppd256: {
// Lowering vector comparisons to fcmp instructions, while
// ignoring signalling behaviour requested
// ignoring rounding mode requested
- // This is is only possible as long as FENV_ACCESS is not implemented.
- // See also: https://reviews.llvm.org/D45616
+ // This is only possible if fp-model is not strict and FENV_ACCESS is off.
// The third argument is the comparison condition, and integer in the
// range [0, 31]
@@ -13745,8 +14115,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If the predicate is true or false and we're using constrained intrinsics,
// we don't have a compare intrinsic we can use. Just use the legacy X86
// specific intrinsic.
- if ((Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE) &&
- Builder.getIsFPConstrained()) {
+ // If the intrinsic is mask enabled and we're using constrained intrinsics,
+ // use the legacy X86 specific intrinsic.
+ if (Builder.getIsFPConstrained() &&
+ (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
+ IsMaskFCmp)) {
Intrinsic::ID IID;
switch (BuiltinID) {
@@ -13764,36 +14137,32 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
IID = Intrinsic::x86_avx_cmp_pd_256;
break;
case X86::BI__builtin_ia32_cmpps512_mask:
- IID = Intrinsic::x86_avx512_cmp_ps_512;
+ IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
break;
case X86::BI__builtin_ia32_cmppd512_mask:
- IID = Intrinsic::x86_avx512_cmp_pd_512;
+ IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
break;
case X86::BI__builtin_ia32_cmpps128_mask:
- IID = Intrinsic::x86_avx512_cmp_ps_128;
+ IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
break;
case X86::BI__builtin_ia32_cmpps256_mask:
- IID = Intrinsic::x86_avx512_cmp_ps_256;
+ IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
break;
case X86::BI__builtin_ia32_cmppd128_mask:
- IID = Intrinsic::x86_avx512_cmp_pd_128;
+ IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
break;
case X86::BI__builtin_ia32_cmppd256_mask:
- IID = Intrinsic::x86_avx512_cmp_pd_256;
+ IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
break;
}
Function *Intr = CGM.getIntrinsic(IID);
- if (cast<llvm::VectorType>(Intr->getReturnType())
- ->getElementType()
- ->isIntegerTy(1)) {
+ if (IsMaskFCmp) {
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
- Value *MaskIn = Ops[3];
- Ops.erase(&Ops[3]);
-
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
+ Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
Value *Cmp = Builder.CreateCall(Intr, Ops);
- return EmitX86MaskedCompareResult(*this, Cmp, NumElts, MaskIn);
+ return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
}
return Builder.CreateCall(Intr, Ops);
@@ -13801,16 +14170,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Builtins without the _mask suffix return a vector of integers
// of the same width as the input vectors
- switch (BuiltinID) {
- case X86::BI__builtin_ia32_cmpps512_mask:
- case X86::BI__builtin_ia32_cmppd512_mask:
- case X86::BI__builtin_ia32_cmpps128_mask:
- case X86::BI__builtin_ia32_cmpps256_mask:
- case X86::BI__builtin_ia32_cmppd128_mask:
- case X86::BI__builtin_ia32_cmppd256_mask: {
- // FIXME: Support SAE.
+ if (IsMaskFCmp) {
+ // We ignore SAE if strict FP is disabled. We only keep precise
+ // exception behavior under strict FP.
unsigned NumElts =
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Value *Cmp;
if (IsSignaling)
Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
@@ -13818,9 +14182,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
}
- default:
- return getVectorFCmpIR(Pred, IsSignaling);
- }
+
+ return getVectorFCmpIR(Pred, IsSignaling);
}
// SSE scalar comparison intrinsics
@@ -13869,7 +14232,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
Ops[2] = getMaskVecValue(
*this, Ops[2],
- cast<llvm::VectorType>(Ops[0]->getType())->getNumElements());
+ cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
@@ -13935,25 +14298,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__shiftleft128:
case X86::BI__shiftright128: {
- // FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this:
- // llvm::Function *F = CGM.getIntrinsic(
- // BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
- // Int64Ty);
- // Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
- // return Builder.CreateCall(F, Ops);
- llvm::Type *Int128Ty = Builder.getInt128Ty();
- Value *HighPart128 =
- Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64);
- Value *LowPart128 = Builder.CreateZExt(Ops[0], Int128Ty);
- Value *Val = Builder.CreateOr(HighPart128, LowPart128);
- Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
- llvm::ConstantInt::get(Int128Ty, 0x3f));
- Value *Res;
- if (BuiltinID == X86::BI__shiftleft128)
- Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64);
- else
- Res = Builder.CreateLShr(Val, Amt);
- return Builder.CreateTrunc(Res, Int64Ty);
+ llvm::Function *F = CGM.getIntrinsic(
+ BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
+ Int64Ty);
+ // Flip low/high ops and zero-extend amount to matching type.
+ // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
+ // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
+ std::swap(Ops[0], Ops[1]);
+ Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
+ return Builder.CreateCall(F, Ops);
}
case X86::BI_ReadWriteBarrier:
case X86::BI_ReadBarrier:
@@ -13961,65 +14314,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
llvm::SyncScope::SingleThread);
}
- case X86::BI_BitScanForward:
- case X86::BI_BitScanForward64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
- case X86::BI_BitScanReverse:
- case X86::BI_BitScanReverse64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
-
- case X86::BI_InterlockedAnd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
- case X86::BI_InterlockedExchange64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
- case X86::BI_InterlockedExchangeAdd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
- case X86::BI_InterlockedExchangeSub64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
- case X86::BI_InterlockedOr64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
- case X86::BI_InterlockedXor64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
- case X86::BI_InterlockedDecrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
- case X86::BI_InterlockedIncrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
- case X86::BI_InterlockedCompareExchange128: {
- // InterlockedCompareExchange128 doesn't directly refer to 128bit ints,
- // instead it takes pointers to 64bit ints for Destination and
- // ComparandResult, and exchange is taken as two 64bit ints (high & low).
- // The previous value is written to ComparandResult, and success is
- // returned.
-
- llvm::Type *Int128Ty = Builder.getInt128Ty();
- llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
-
- Value *Destination =
- Builder.CreateBitCast(Ops[0], Int128PtrTy);
- Value *ExchangeHigh128 = Builder.CreateZExt(Ops[1], Int128Ty);
- Value *ExchangeLow128 = Builder.CreateZExt(Ops[2], Int128Ty);
- Address ComparandResult(Builder.CreateBitCast(Ops[3], Int128PtrTy),
- getContext().toCharUnitsFromBits(128));
-
- Value *Exchange = Builder.CreateOr(
- Builder.CreateShl(ExchangeHigh128, 64, "", false, false),
- ExchangeLow128);
-
- Value *Comparand = Builder.CreateLoad(ComparandResult);
-
- AtomicCmpXchgInst *CXI =
- Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
- AtomicOrdering::SequentiallyConsistent,
- AtomicOrdering::SequentiallyConsistent);
- CXI->setVolatile(true);
-
- // Write the result back to the inout pointer.
- Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult);
-
- // Get the success boolean and zero extend it to i8.
- Value *Success = Builder.CreateExtractValue(CXI, 1);
- return Builder.CreateZExt(Success, ConvertType(E->getType()));
- }
case X86::BI_AddressOfReturnAddress: {
Function *F =
@@ -14076,28 +14370,124 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_paddsw256:
case X86::BI__builtin_ia32_paddsb128:
case X86::BI__builtin_ia32_paddsw128:
- return EmitX86AddSubSatExpr(*this, Ops, true, true);
+ return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
case X86::BI__builtin_ia32_paddusb512:
case X86::BI__builtin_ia32_paddusw512:
case X86::BI__builtin_ia32_paddusb256:
case X86::BI__builtin_ia32_paddusw256:
case X86::BI__builtin_ia32_paddusb128:
case X86::BI__builtin_ia32_paddusw128:
- return EmitX86AddSubSatExpr(*this, Ops, false, true);
+ return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
case X86::BI__builtin_ia32_psubsb512:
case X86::BI__builtin_ia32_psubsw512:
case X86::BI__builtin_ia32_psubsb256:
case X86::BI__builtin_ia32_psubsw256:
case X86::BI__builtin_ia32_psubsb128:
case X86::BI__builtin_ia32_psubsw128:
- return EmitX86AddSubSatExpr(*this, Ops, true, false);
+ return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
case X86::BI__builtin_ia32_psubusb512:
case X86::BI__builtin_ia32_psubusw512:
case X86::BI__builtin_ia32_psubusb256:
case X86::BI__builtin_ia32_psubusw256:
case X86::BI__builtin_ia32_psubusb128:
case X86::BI__builtin_ia32_psubusw128:
- return EmitX86AddSubSatExpr(*this, Ops, false, false);
+ return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
+ case X86::BI__builtin_ia32_encodekey128_u32: {
+ Intrinsic::ID IID = Intrinsic::x86_encodekey128;
+
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
+
+ for (int i = 0; i < 6; ++i) {
+ Value *Extract = Builder.CreateExtractValue(Call, i + 1);
+ Value *Ptr = Builder.CreateConstGEP1_32(Ops[2], i * 16);
+ Ptr = Builder.CreateBitCast(
+ Ptr, llvm::PointerType::getUnqual(Extract->getType()));
+ Builder.CreateAlignedStore(Extract, Ptr, Align(1));
+ }
+
+ return Builder.CreateExtractValue(Call, 0);
+ }
+ case X86::BI__builtin_ia32_encodekey256_u32: {
+ Intrinsic::ID IID = Intrinsic::x86_encodekey256;
+
+ Value *Call =
+ Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
+
+ for (int i = 0; i < 7; ++i) {
+ Value *Extract = Builder.CreateExtractValue(Call, i + 1);
+ Value *Ptr = Builder.CreateConstGEP1_32(Ops[3], i * 16);
+ Ptr = Builder.CreateBitCast(
+ Ptr, llvm::PointerType::getUnqual(Extract->getType()));
+ Builder.CreateAlignedStore(Extract, Ptr, Align(1));
+ }
+
+ return Builder.CreateExtractValue(Call, 0);
+ }
+ case X86::BI__builtin_ia32_aesenc128kl_u8:
+ case X86::BI__builtin_ia32_aesdec128kl_u8:
+ case X86::BI__builtin_ia32_aesenc256kl_u8:
+ case X86::BI__builtin_ia32_aesdec256kl_u8: {
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unexpected builtin");
+ case X86::BI__builtin_ia32_aesenc128kl_u8:
+ IID = Intrinsic::x86_aesenc128kl;
+ break;
+ case X86::BI__builtin_ia32_aesdec128kl_u8:
+ IID = Intrinsic::x86_aesdec128kl;
+ break;
+ case X86::BI__builtin_ia32_aesenc256kl_u8:
+ IID = Intrinsic::x86_aesenc256kl;
+ break;
+ case X86::BI__builtin_ia32_aesdec256kl_u8:
+ IID = Intrinsic::x86_aesdec256kl;
+ break;
+ }
+
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
+
+ Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
+ Ops[0]);
+
+ return Builder.CreateExtractValue(Call, 0);
+ }
+ case X86::BI__builtin_ia32_aesencwide128kl_u8:
+ case X86::BI__builtin_ia32_aesdecwide128kl_u8:
+ case X86::BI__builtin_ia32_aesencwide256kl_u8:
+ case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ case X86::BI__builtin_ia32_aesencwide128kl_u8:
+ IID = Intrinsic::x86_aesencwide128kl;
+ break;
+ case X86::BI__builtin_ia32_aesdecwide128kl_u8:
+ IID = Intrinsic::x86_aesdecwide128kl;
+ break;
+ case X86::BI__builtin_ia32_aesencwide256kl_u8:
+ IID = Intrinsic::x86_aesencwide256kl;
+ break;
+ case X86::BI__builtin_ia32_aesdecwide256kl_u8:
+ IID = Intrinsic::x86_aesdecwide256kl;
+ break;
+ }
+
+ Value *InOps[9];
+ InOps[0] = Ops[2];
+ for (int i = 0; i != 8; ++i) {
+ Value *Ptr = Builder.CreateConstGEP1_32(Ops[1], i);
+ InOps[i + 1] = Builder.CreateAlignedLoad(Ptr, Align(16));
+ }
+
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
+
+ for (int i = 0; i != 8; ++i) {
+ Value *Extract = Builder.CreateExtractValue(Call, i + 1);
+ Value *Ptr = Builder.CreateConstGEP1_32(Ops[0], i);
+ Builder.CreateAlignedStore(Extract, Ptr, Align(16));
+ }
+
+ return Builder.CreateExtractValue(Call, 0);
+ }
}
}
@@ -14284,6 +14674,63 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
+ case PPC::BI__builtin_altivec_vec_replace_elt:
+ case PPC::BI__builtin_altivec_vec_replace_unaligned: {
+ // The third argument of vec_replace_elt and vec_replace_unaligned must
+ // be a compile time constant and will be emitted either to the vinsw
+ // or vinsd instruction.
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
+ assert(ArgCI &&
+ "Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Function *F = nullptr;
+ Value *Call = nullptr;
+ int64_t ConstArg = ArgCI->getSExtValue();
+ unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits();
+ bool Is32Bit = false;
+ assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width");
+ // The input to vec_replace_elt is an element index, not a byte index.
+ if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt)
+ ConstArg *= ArgWidth / 8;
+ if (ArgWidth == 32) {
+ Is32Bit = true;
+ // When the second argument is 32 bits, it can either be an integer or
+ // a float. The vinsw intrinsic is used in this case.
+ F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw);
+ // Fix the constant according to endianess.
+ if (getTarget().isLittleEndian())
+ ConstArg = 12 - ConstArg;
+ } else {
+ // When the second argument is 64 bits, it can either be a long long or
+ // a double. The vinsd intrinsic is used in this case.
+ F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd);
+ // Fix the constant for little endian.
+ if (getTarget().isLittleEndian())
+ ConstArg = 8 - ConstArg;
+ }
+ Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg);
+ // Depending on ArgWidth, the input vector could be a float or a double.
+ // If the input vector is a float type, bitcast the inputs to integers. Or,
+ // if the input vector is a double, bitcast the inputs to 64-bit integers.
+ if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) {
+ Ops[0] = Builder.CreateBitCast(
+ Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4)
+ : llvm::FixedVectorType::get(Int64Ty, 2));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty);
+ }
+ // Emit the call to vinsw or vinsd.
+ Call = Builder.CreateCall(F, Ops);
+ // Depending on the builtin, bitcast to the approriate result type.
+ if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
+ !Ops[1]->getType()->isIntegerTy())
+ return Builder.CreateBitCast(Call, ResultType);
+ else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
+ Ops[1]->getType()->isIntegerTy())
+ return Call;
+ else
+ return Builder.CreateBitCast(Call,
+ llvm::FixedVectorType::get(Int8Ty, 16));
+ }
case PPC::BI__builtin_altivec_vpopcntb:
case PPC::BI__builtin_altivec_vpopcnth:
case PPC::BI__builtin_altivec_vpopcntw:
@@ -14329,8 +14776,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
BuiltinID == PPC::BI__builtin_vsx_xvrspic)
ID = Builder.getIsFPConstrained()
- ? Intrinsic::experimental_constrained_nearbyint
- : Intrinsic::nearbyint;
+ ? Intrinsic::experimental_constrained_rint
+ : Intrinsic::rint;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
BuiltinID == PPC::BI__builtin_vsx_xvrspip)
ID = Builder.getIsFPConstrained()
@@ -14565,6 +15012,77 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractElement(Unpacked, Index);
}
+
+ // The PPC MMA builtins take a pointer to a __vector_quad as an argument.
+ // Some of the MMA instructions accumulate their result into an existing
+ // accumulator whereas the others generate a new accumulator. So we need to
+ // use custom code generation to expand a builtin call with a pointer to a
+ // load (if the corresponding instruction accumulates its result) followed by
+ // the call to the intrinsic and a store of the result.
+#define CUSTOM_BUILTIN(Name, Types, Accumulate) \
+ case PPC::BI__builtin_##Name:
+#include "clang/Basic/BuiltinsPPC.def"
+ {
+ // The first argument of these two builtins is a pointer used to store their
+ // result. However, the llvm intrinsics return their result in multiple
+ // return values. So, here we emit code extracting these values from the
+ // intrinsic results and storing them using that pointer.
+ if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
+ BuiltinID == PPC::BI__builtin_vsx_disassemble_pair) {
+ unsigned NumVecs = 2;
+ auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
+ if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
+ NumVecs = 4;
+ Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
+ }
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic);
+ Address Addr = EmitPointerWithAlignment(E->getArg(1));
+ Value *Vec = Builder.CreateLoad(Addr);
+ Value *Call = Builder.CreateCall(F, {Vec});
+ llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
+ Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo());
+ for (unsigned i=0; i<NumVecs; i++) {
+ Value *Vec = Builder.CreateExtractValue(Call, i);
+ llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
+ Value *GEP = Builder.CreateInBoundsGEP(Ptr, Index);
+ Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
+ }
+ return Call;
+ }
+ bool Accumulate;
+ switch (BuiltinID) {
+ #define CUSTOM_BUILTIN(Name, Types, Acc) \
+ case PPC::BI__builtin_##Name: \
+ ID = Intrinsic::ppc_##Name; \
+ Accumulate = Acc; \
+ break;
+ #include "clang/Basic/BuiltinsPPC.def"
+ }
+ if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
+ BuiltinID == PPC::BI__builtin_vsx_stxvp) {
+ if (BuiltinID == PPC::BI__builtin_vsx_lxvp) {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
+ } else {
+ Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
+ }
+ Ops.pop_back();
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+ }
+ SmallVector<Value*, 4> CallOps;
+ if (Accumulate) {
+ Address Addr = EmitPointerWithAlignment(E->getArg(0));
+ Value *Acc = Builder.CreateLoad(Addr);
+ CallOps.push_back(Acc);
+ }
+ for (unsigned i=1; i<Ops.size(); i++)
+ CallOps.push_back(Ops[i]);
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ Value *Call = Builder.CreateCall(F, CallOps);
+ return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
+ }
}
}
@@ -14608,6 +15126,22 @@ Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
llvm::MDNode::get(CGF.getLLVMContext(), None));
return LD;
}
+
+// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
+Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
+ const unsigned XOffset = 12;
+ auto *DP = EmitAMDGPUDispatchPtr(CGF);
+ // Indexing the HSA kernel_dispatch_packet struct.
+ auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
+ auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
+ auto *DstTy =
+ CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
+ auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
+ auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
+ LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
+ llvm::MDNode::get(CGF.getLLVMContext(), None));
+ return LD;
+}
} // namespace
// For processing memory ordering and memory scope arguments of various
@@ -14811,6 +15345,32 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
return Builder.CreateCall(F, { Src0, Builder.getFalse() });
}
+ case AMDGPU::BI__builtin_amdgcn_ds_faddf:
+ case AMDGPU::BI__builtin_amdgcn_ds_fminf:
+ case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
+ Intrinsic::ID Intrin;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_ds_faddf:
+ Intrin = Intrinsic::amdgcn_ds_fadd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_ds_fminf:
+ Intrin = Intrinsic::amdgcn_ds_fmin;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
+ Intrin = Intrinsic::amdgcn_ds_fmax;
+ break;
+ }
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
+ llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
+ llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
+ llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
+ llvm::FunctionType *FTy = F->getFunctionType();
+ llvm::Type *PTy = FTy->getParamType(0);
+ Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
+ return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
+ }
case AMDGPU::BI__builtin_amdgcn_read_exec: {
CallInst *CI = cast<CallInst>(
EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
@@ -14842,6 +15402,14 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
return EmitAMDGPUWorkGroupSize(*this, 2);
+ // amdgcn grid size
+ case AMDGPU::BI__builtin_amdgcn_grid_size_x:
+ return EmitAMDGPUGridSize(*this, 0);
+ case AMDGPU::BI__builtin_amdgcn_grid_size_y:
+ return EmitAMDGPUGridSize(*this, 1);
+ case AMDGPU::BI__builtin_amdgcn_grid_size_z:
+ return EmitAMDGPUGridSize(*this, 2);
+
// r600 intrinsics
case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
@@ -15089,11 +15657,8 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
// Constant-fold the M4 and M5 mask arguments.
- llvm::APSInt M4, M5;
- bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
- bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
- assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
- (void)IsConstM4; (void)IsConstM5;
+ llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
+ llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some combinations of M4 and M5.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
@@ -15148,10 +15713,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
// Constant-fold the M4 mask argument.
- llvm::APSInt M4;
- bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
- assert(IsConstM4 && "Constant arg isn't actually constant?");
- (void)IsConstM4;
+ llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some values of M4.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
@@ -15185,10 +15747,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
// Constant-fold the M4 mask argument.
- llvm::APSInt M4;
- bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
- assert(IsConstM4 && "Constant arg isn't actually constant?");
- (void)IsConstM4;
+ llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some values of M4.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
@@ -15855,10 +16414,11 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
- llvm::APSInt isColMajorArg;
- if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
+ Optional<llvm::APSInt> isColMajorArg =
+ E->getArg(3)->getIntegerConstantExpr(getContext());
+ if (!isColMajorArg)
return nullptr;
- bool isColMajor = isColMajorArg.getSExtValue();
+ bool isColMajor = isColMajorArg->getSExtValue();
NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
unsigned IID = isColMajor ? II.IID_col : II.IID_row;
if (IID == 0)
@@ -15899,10 +16459,11 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
Value *Dst = EmitScalarExpr(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
- llvm::APSInt isColMajorArg;
- if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
+ Optional<llvm::APSInt> isColMajorArg =
+ E->getArg(3)->getIntegerConstantExpr(getContext());
+ if (!isColMajorArg)
return nullptr;
- bool isColMajor = isColMajorArg.getSExtValue();
+ bool isColMajor = isColMajorArg->getSExtValue();
NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
unsigned IID = isColMajor ? II.IID_col : II.IID_row;
if (IID == 0)
@@ -15949,16 +16510,20 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
Address SrcA = EmitPointerWithAlignment(E->getArg(1));
Address SrcB = EmitPointerWithAlignment(E->getArg(2));
Address SrcC = EmitPointerWithAlignment(E->getArg(3));
- llvm::APSInt LayoutArg;
- if (!E->getArg(4)->isIntegerConstantExpr(LayoutArg, getContext()))
+ Optional<llvm::APSInt> LayoutArg =
+ E->getArg(4)->getIntegerConstantExpr(getContext());
+ if (!LayoutArg)
return nullptr;
- int Layout = LayoutArg.getSExtValue();
+ int Layout = LayoutArg->getSExtValue();
if (Layout < 0 || Layout > 3)
return nullptr;
llvm::APSInt SatfArg;
if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
SatfArg = 0; // .b1 does not have satf argument.
- else if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext()))
+ else if (Optional<llvm::APSInt> OptSatfArg =
+ E->getArg(5)->getIntegerConstantExpr(getContext()))
+ SatfArg = *OptSatfArg;
+ else
return nullptr;
bool Satf = SatfArg.getSExtValue();
NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
@@ -16106,16 +16671,16 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_memory_size: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *I = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
return Builder.CreateCall(Callee, I);
}
case WebAssembly::BI__builtin_wasm_memory_grow: {
llvm::Type *ResultType = ConvertType(E->getType());
- Value *Args[] = {
- EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(1))
- };
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
+ Value *Args[] = {EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1))};
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
return Builder.CreateCall(Callee, Args);
}
case WebAssembly::BI__builtin_wasm_tls_size: {
@@ -16138,28 +16703,28 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
return Builder.CreateCall(Callee, {Tag, Obj});
}
- case WebAssembly::BI__builtin_wasm_rethrow_in_catch: {
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch);
+ case WebAssembly::BI__builtin_wasm_rethrow: {
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
return Builder.CreateCall(Callee);
}
- case WebAssembly::BI__builtin_wasm_atomic_wait_i32: {
+ case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: {
Value *Addr = EmitScalarExpr(E->getArg(0));
Value *Expected = EmitScalarExpr(E->getArg(1));
Value *Timeout = EmitScalarExpr(E->getArg(2));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32);
return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
}
- case WebAssembly::BI__builtin_wasm_atomic_wait_i64: {
+ case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: {
Value *Addr = EmitScalarExpr(E->getArg(0));
Value *Expected = EmitScalarExpr(E->getArg(1));
Value *Timeout = EmitScalarExpr(E->getArg(2));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64);
return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
}
- case WebAssembly::BI__builtin_wasm_atomic_notify: {
+ case WebAssembly::BI__builtin_wasm_memory_atomic_notify: {
Value *Addr = EmitScalarExpr(E->getArg(0));
Value *Count = EmitScalarExpr(E->getArg(1));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify);
return Builder.CreateCall(Callee, {Addr, Count});
}
case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
@@ -16190,7 +16755,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
- {ResT, Src->getType()});
+ {ResT, Src->getType()});
return Builder.CreateCall(Callee, {Src});
}
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
@@ -16201,7 +16766,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
- {ResT, Src->getType()});
+ {ResT, Src->getType()});
return Builder.CreateCall(Callee, {Src});
}
case WebAssembly::BI__builtin_wasm_min_f32:
@@ -16210,8 +16775,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_min_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
- Function *Callee = CGM.getIntrinsic(Intrinsic::minimum,
- ConvertType(E->getType()));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_max_f32:
@@ -16220,8 +16785,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_max_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
- Function *Callee = CGM.getIntrinsic(Intrinsic::maximum,
- ConvertType(E->getType()));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_pmin_f32x4:
@@ -16287,9 +16852,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
- llvm::APSInt LaneConst;
- if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
+ llvm::APSInt LaneConst =
+ *E->getArg(1)->getIntegerConstantExpr(getContext());
Value *Vec = EmitScalarExpr(E->getArg(0));
Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
Value *Extract = Builder.CreateExtractElement(Vec, Lane);
@@ -16315,9 +16879,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
- llvm::APSInt LaneConst;
- if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
+ llvm::APSInt LaneConst =
+ *E->getArg(1)->getIntegerConstantExpr(getContext());
Value *Vec = EmitScalarExpr(E->getArg(0));
Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
Value *Val = EmitScalarExpr(E->getArg(2));
@@ -16430,12 +16993,95 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
+ case WebAssembly::BI__builtin_wasm_q15mulr_saturate_s_i16x8: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_q15mulr_saturate_signed);
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_s_i32x4:
+ case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_s_i32x4:
+ case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_u_i32x4:
+ case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_u_i32x4:
+ case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_s_i64x2:
+ case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_s_i64x2:
+ case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_u_i64x2:
+ case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_u_i64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_s_i32x4:
+ case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_s_i64x2:
+ IntNo = Intrinsic::wasm_extmul_low_signed;
+ break;
+ case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_u_i32x4:
+ case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_u_i64x2:
+ IntNo = Intrinsic::wasm_extmul_low_unsigned;
+ break;
+ case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_s_i32x4:
+ case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_s_i64x2:
+ IntNo = Intrinsic::wasm_extmul_high_signed;
+ break;
+ case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_u_i32x4:
+ case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_u_i64x2:
+ IntNo = Intrinsic::wasm_extmul_high_unsigned;
+ break;
+ default:
+ llvm_unreachable("unexptected builtin ID");
+ }
+
+ Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
+ case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
+ IntNo = Intrinsic::wasm_extadd_pairwise_signed;
+ break;
+ case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4:
+ IntNo = Intrinsic::wasm_extadd_pairwise_unsigned;
+ break;
+ default:
+ llvm_unreachable("unexptected builtin ID");
+ }
+
+ Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, Vec);
+ }
case WebAssembly::BI__builtin_wasm_bitselect: {
Value *V1 = EmitScalarExpr(E->getArg(0));
Value *V2 = EmitScalarExpr(E->getArg(1));
Value *C = EmitScalarExpr(E->getArg(2));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
- ConvertType(E->getType()));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {V1, V2, C});
+ }
+ case WebAssembly::BI__builtin_wasm_signselect_i8x16:
+ case WebAssembly::BI__builtin_wasm_signselect_i16x8:
+ case WebAssembly::BI__builtin_wasm_signselect_i32x4:
+ case WebAssembly::BI__builtin_wasm_signselect_i64x2: {
+ Value *V1 = EmitScalarExpr(E->getArg(0));
+ Value *V2 = EmitScalarExpr(E->getArg(1));
+ Value *C = EmitScalarExpr(E->getArg(2));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_signselect, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {V1, V2, C});
}
case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
@@ -16444,6 +17090,17 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
return Builder.CreateCall(Callee, {LHS, RHS});
}
+ case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_popcnt);
+ return Builder.CreateCall(Callee, {Vec});
+ }
+ case WebAssembly::BI__builtin_wasm_eq_i64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_eq);
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
case WebAssembly::BI__builtin_wasm_any_true_i8x16:
case WebAssembly::BI__builtin_wasm_any_true_i16x8:
case WebAssembly::BI__builtin_wasm_any_true_i32x4:
@@ -16475,7 +17132,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
- case WebAssembly::BI__builtin_wasm_bitmask_i32x4: {
+ case WebAssembly::BI__builtin_wasm_bitmask_i32x4:
+ case WebAssembly::BI__builtin_wasm_bitmask_i64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
Function *Callee =
CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
@@ -16539,39 +17197,124 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
return Builder.CreateCall(Callee, {Low, High});
}
- case WebAssembly::BI__builtin_wasm_widen_low_s_i16x8_i8x16:
- case WebAssembly::BI__builtin_wasm_widen_high_s_i16x8_i8x16:
- case WebAssembly::BI__builtin_wasm_widen_low_u_i16x8_i8x16:
- case WebAssembly::BI__builtin_wasm_widen_high_u_i16x8_i8x16:
- case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i16x8:
- case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i16x8:
- case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i16x8:
- case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i16x8: {
+ case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
+ case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
+ case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
+ case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
unsigned IntNo;
switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_widen_low_s_i16x8_i8x16:
- case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i16x8:
+ case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
IntNo = Intrinsic::wasm_widen_low_signed;
break;
- case WebAssembly::BI__builtin_wasm_widen_high_s_i16x8_i8x16:
- case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i16x8:
+ case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
IntNo = Intrinsic::wasm_widen_high_signed;
break;
- case WebAssembly::BI__builtin_wasm_widen_low_u_i16x8_i8x16:
- case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i16x8:
+ case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
IntNo = Intrinsic::wasm_widen_low_unsigned;
break;
- case WebAssembly::BI__builtin_wasm_widen_high_u_i16x8_i8x16:
- case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i16x8:
+ case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2:
IntNo = Intrinsic::wasm_widen_high_unsigned;
break;
+ }
+ Function *Callee = CGM.getIntrinsic(IntNo);
+ return Builder.CreateCall(Callee, Vec);
+ }
+ case WebAssembly::BI__builtin_wasm_convert_low_s_i32x4_f64x2:
+ case WebAssembly::BI__builtin_wasm_convert_low_u_i32x4_f64x2: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_convert_low_s_i32x4_f64x2:
+ IntNo = Intrinsic::wasm_convert_low_signed;
+ break;
+ case WebAssembly::BI__builtin_wasm_convert_low_u_i32x4_f64x2:
+ IntNo = Intrinsic::wasm_convert_low_unsigned;
+ break;
+ }
+ Function *Callee = CGM.getIntrinsic(IntNo);
+ return Builder.CreateCall(Callee, Vec);
+ }
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4:
+ IntNo = Intrinsic::wasm_trunc_saturate_zero_signed;
+ break;
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4:
+ IntNo = Intrinsic::wasm_trunc_saturate_zero_unsigned;
+ break;
+ }
+ Function *Callee = CGM.getIntrinsic(IntNo);
+ return Builder.CreateCall(Callee, Vec);
+ }
+ case WebAssembly::BI__builtin_wasm_demote_zero_f64x2_f32x4: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_demote_zero);
+ return Builder.CreateCall(Callee, Vec);
+ }
+ case WebAssembly::BI__builtin_wasm_promote_low_f32x4_f64x2: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_promote_low);
+ return Builder.CreateCall(Callee, Vec);
+ }
+ case WebAssembly::BI__builtin_wasm_load32_zero: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load32_zero);
+ return Builder.CreateCall(Callee, {Ptr});
+ }
+ case WebAssembly::BI__builtin_wasm_load64_zero: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load64_zero);
+ return Builder.CreateCall(Callee, {Ptr});
+ }
+ case WebAssembly::BI__builtin_wasm_load8_lane:
+ case WebAssembly::BI__builtin_wasm_load16_lane:
+ case WebAssembly::BI__builtin_wasm_load32_lane:
+ case WebAssembly::BI__builtin_wasm_load64_lane:
+ case WebAssembly::BI__builtin_wasm_store8_lane:
+ case WebAssembly::BI__builtin_wasm_store16_lane:
+ case WebAssembly::BI__builtin_wasm_store32_lane:
+ case WebAssembly::BI__builtin_wasm_store64_lane: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Vec = EmitScalarExpr(E->getArg(1));
+ Optional<llvm::APSInt> LaneIdxConst =
+ E->getArg(2)->getIntegerConstantExpr(getContext());
+ assert(LaneIdxConst && "Constant arg isn't actually constant?");
+ Value *LaneIdx = llvm::ConstantInt::get(getLLVMContext(), *LaneIdxConst);
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_load8_lane:
+ IntNo = Intrinsic::wasm_load8_lane;
+ break;
+ case WebAssembly::BI__builtin_wasm_load16_lane:
+ IntNo = Intrinsic::wasm_load16_lane;
+ break;
+ case WebAssembly::BI__builtin_wasm_load32_lane:
+ IntNo = Intrinsic::wasm_load32_lane;
+ break;
+ case WebAssembly::BI__builtin_wasm_load64_lane:
+ IntNo = Intrinsic::wasm_load64_lane;
+ break;
+ case WebAssembly::BI__builtin_wasm_store8_lane:
+ IntNo = Intrinsic::wasm_store8_lane;
+ break;
+ case WebAssembly::BI__builtin_wasm_store16_lane:
+ IntNo = Intrinsic::wasm_store16_lane;
+ break;
+ case WebAssembly::BI__builtin_wasm_store32_lane:
+ IntNo = Intrinsic::wasm_store32_lane;
+ break;
+ case WebAssembly::BI__builtin_wasm_store64_lane:
+ IntNo = Intrinsic::wasm_store64_lane;
+ break;
default:
llvm_unreachable("unexpected builtin ID");
}
- Function *Callee =
- CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Vec->getType()});
- return Builder.CreateCall(Callee, Vec);
+ Function *Callee = CGM.getIntrinsic(IntNo);
+ return Builder.CreateCall(Callee, {Ptr, Vec, LaneIdx});
}
case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
Value *Ops[18];
@@ -16579,14 +17322,24 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
while (OpIdx < 18) {
- llvm::APSInt LaneConst;
- if (!E->getArg(OpIdx)->isIntegerConstantExpr(LaneConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
+ Optional<llvm::APSInt> LaneConst =
+ E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
+ assert(LaneConst && "Constant arg isn't actually constant?");
+ Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
}
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
return Builder.CreateCall(Callee, Ops);
}
+ case WebAssembly::BI__builtin_wasm_prefetch_t: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_prefetch_t);
+ return Builder.CreateCall(Callee, Ptr);
+ }
+ case WebAssembly::BI__builtin_wasm_prefetch_nt: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_prefetch_nt);
+ return Builder.CreateCall(Callee, Ptr);
+ }
default:
return nullptr;
}
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index baf2c79cc2b6..33a2d6f4483e 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -21,6 +21,7 @@
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/ReplaceConstant.h"
#include "llvm/Support/Format.h"
using namespace clang;
@@ -128,13 +129,15 @@ public:
DeviceVars.push_back({&Var,
VD,
{DeviceVarFlags::Variable, Extern, Constant,
- /*Normalized*/ false, /*Type*/ 0}});
+ VD->hasAttr<HIPManagedAttr>(),
+ /*Normalized*/ false, 0}});
}
void registerDeviceSurf(const VarDecl *VD, llvm::GlobalVariable &Var,
bool Extern, int Type) override {
DeviceVars.push_back({&Var,
VD,
{DeviceVarFlags::Surface, Extern, /*Constant*/ false,
+ /*Managed*/ false,
/*Normalized*/ false, Type}});
}
void registerDeviceTex(const VarDecl *VD, llvm::GlobalVariable &Var,
@@ -142,7 +145,7 @@ public:
DeviceVars.push_back({&Var,
VD,
{DeviceVarFlags::Texture, Extern, /*Constant*/ false,
- Normalized, Type}});
+ /*Managed*/ false, Normalized, Type}});
}
/// Creates module constructor function
@@ -354,14 +357,12 @@ void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
CharUnits Offset = CharUnits::Zero();
for (const VarDecl *A : Args) {
- CharUnits TyWidth, TyAlign;
- std::tie(TyWidth, TyAlign) =
- CGM.getContext().getTypeInfoInChars(A->getType());
- Offset = Offset.alignTo(TyAlign);
+ auto TInfo = CGM.getContext().getTypeInfoInChars(A->getType());
+ Offset = Offset.alignTo(TInfo.Align);
llvm::Value *Args[] = {
CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
VoidPtrTy),
- llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()),
+ llvm::ConstantInt::get(SizeTy, TInfo.Width.getQuantity()),
llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
};
llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
@@ -370,7 +371,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
CGF.Builder.CreateCondBr(CBZero, NextBlock, EndBlock);
CGF.EmitBlock(NextBlock);
- Offset += TyWidth;
+ Offset += TInfo.Width;
}
// Emit the call to cudaLaunch
@@ -382,6 +383,47 @@ void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
CGF.EmitBlock(EndBlock);
}
+// Replace the original variable Var with the address loaded from variable
+// ManagedVar populated by HIP runtime.
+static void replaceManagedVar(llvm::GlobalVariable *Var,
+ llvm::GlobalVariable *ManagedVar) {
+ SmallVector<SmallVector<llvm::User *, 8>, 8> WorkList;
+ for (auto &&VarUse : Var->uses()) {
+ WorkList.push_back({VarUse.getUser()});
+ }
+ while (!WorkList.empty()) {
+ auto &&WorkItem = WorkList.pop_back_val();
+ auto *U = WorkItem.back();
+ if (isa<llvm::ConstantExpr>(U)) {
+ for (auto &&UU : U->uses()) {
+ WorkItem.push_back(UU.getUser());
+ WorkList.push_back(WorkItem);
+ WorkItem.pop_back();
+ }
+ continue;
+ }
+ if (auto *I = dyn_cast<llvm::Instruction>(U)) {
+ llvm::Value *OldV = Var;
+ llvm::Instruction *NewV =
+ new llvm::LoadInst(Var->getType(), ManagedVar, "ld.managed", false,
+ llvm::Align(Var->getAlignment()), I);
+ WorkItem.pop_back();
+ // Replace constant expressions directly or indirectly using the managed
+ // variable with instructions.
+ for (auto &&Op : WorkItem) {
+ auto *CE = cast<llvm::ConstantExpr>(Op);
+ auto *NewInst = llvm::createReplacementInstr(CE, I);
+ NewInst->replaceUsesOfWith(OldV, NewV);
+ OldV = CE;
+ NewV = NewInst;
+ }
+ I->replaceUsesOfWith(OldV, NewV);
+ } else {
+ llvm_unreachable("Invalid use of managed variable");
+ }
+ }
+}
+
/// Creates a function that sets up state on the host side for CUDA objects that
/// have a presence on both the host and device sides. Specifically, registers
/// the host side of kernel functions and device global variables with the CUDA
@@ -454,6 +496,13 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
llvm::FunctionCallee RegisterVar = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidTy, RegisterVarParams, false),
addUnderscoredPrefixToName("RegisterVar"));
+ // void __hipRegisterManagedVar(void **, char *, char *, const char *,
+ // size_t, unsigned)
+ llvm::Type *RegisterManagedVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
+ CharPtrTy, VarSizeTy, IntTy};
+ llvm::FunctionCallee RegisterManagedVar = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(VoidTy, RegisterManagedVarParams, false),
+ addUnderscoredPrefixToName("RegisterManagedVar"));
// void __cudaRegisterSurface(void **, const struct surfaceReference *,
// const void **, const char *, int, int);
llvm::FunctionCallee RegisterSurf = CGM.CreateRuntimeFunction(
@@ -476,16 +525,34 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
case DeviceVarFlags::Variable: {
uint64_t VarSize =
CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
- llvm::Value *Args[] = {
- &GpuBinaryHandlePtr,
- Builder.CreateBitCast(Var, VoidPtrTy),
- VarName,
- VarName,
- llvm::ConstantInt::get(IntTy, Info.Flags.isExtern()),
- llvm::ConstantInt::get(VarSizeTy, VarSize),
- llvm::ConstantInt::get(IntTy, Info.Flags.isConstant()),
- llvm::ConstantInt::get(IntTy, 0)};
- Builder.CreateCall(RegisterVar, Args);
+ if (Info.Flags.isManaged()) {
+ auto ManagedVar = new llvm::GlobalVariable(
+ CGM.getModule(), Var->getType(),
+ /*isConstant=*/false, Var->getLinkage(),
+ /*Init=*/llvm::ConstantPointerNull::get(Var->getType()),
+ Twine(Var->getName() + ".managed"), /*InsertBefore=*/nullptr,
+ llvm::GlobalVariable::NotThreadLocal);
+ replaceManagedVar(Var, ManagedVar);
+ llvm::Value *Args[] = {
+ &GpuBinaryHandlePtr,
+ Builder.CreateBitCast(ManagedVar, VoidPtrTy),
+ Builder.CreateBitCast(Var, VoidPtrTy),
+ VarName,
+ llvm::ConstantInt::get(VarSizeTy, VarSize),
+ llvm::ConstantInt::get(IntTy, Var->getAlignment())};
+ Builder.CreateCall(RegisterManagedVar, Args);
+ } else {
+ llvm::Value *Args[] = {
+ &GpuBinaryHandlePtr,
+ Builder.CreateBitCast(Var, VoidPtrTy),
+ VarName,
+ VarName,
+ llvm::ConstantInt::get(IntTy, Info.Flags.isExtern()),
+ llvm::ConstantInt::get(VarSizeTy, VarSize),
+ llvm::ConstantInt::get(IntTy, Info.Flags.isConstant()),
+ llvm::ConstantInt::get(IntTy, 0)};
+ Builder.CreateCall(RegisterVar, Args);
+ }
break;
}
case DeviceVarFlags::Surface:
@@ -597,8 +664,10 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
if (CudaGpuBinary) {
// If fatbin is available from early finalization, create a string
// literal containing the fat binary loaded from the given file.
- FatBinStr = makeConstantString(std::string(CudaGpuBinary->getBuffer()),
- "", FatbinConstantName, 8);
+ const unsigned HIPCodeObjectAlign = 4096;
+ FatBinStr =
+ makeConstantString(std::string(CudaGpuBinary->getBuffer()), "",
+ FatbinConstantName, HIPCodeObjectAlign);
} else {
// If fatbin is not available, create an external symbol
// __hip_fatbin in section .hip_fatbin. The external symbol is supposed
diff --git a/clang/lib/CodeGen/CGCUDARuntime.h b/clang/lib/CodeGen/CGCUDARuntime.h
index 19e70a2022a5..ba3404ead368 100644
--- a/clang/lib/CodeGen/CGCUDARuntime.h
+++ b/clang/lib/CodeGen/CGCUDARuntime.h
@@ -54,16 +54,19 @@ public:
unsigned Kind : 2;
unsigned Extern : 1;
unsigned Constant : 1; // Constant variable.
+ unsigned Managed : 1; // Managed variable.
unsigned Normalized : 1; // Normalized texture.
int SurfTexType; // Type of surface/texutre.
public:
- DeviceVarFlags(DeviceVarKind K, bool E, bool C, bool N, int T)
- : Kind(K), Extern(E), Constant(C), Normalized(N), SurfTexType(T) {}
+ DeviceVarFlags(DeviceVarKind K, bool E, bool C, bool M, bool N, int T)
+ : Kind(K), Extern(E), Constant(C), Managed(M), Normalized(N),
+ SurfTexType(T) {}
DeviceVarKind getKind() const { return static_cast<DeviceVarKind>(Kind); }
bool isExtern() const { return Extern; }
bool isConstant() const { return Constant; }
+ bool isManaged() const { return Managed; }
bool isNormalized() const { return Normalized; }
int getSurfTexType() const { return SurfTexType; }
};
diff --git a/clang/lib/CodeGen/CGCXXABI.cpp b/clang/lib/CodeGen/CGCXXABI.cpp
index 65327a2435b5..9714730e3c4b 100644
--- a/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/clang/lib/CodeGen/CGCXXABI.cpp
@@ -135,8 +135,8 @@ void CGCXXABI::buildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
// down to whether we know it's a complete object or not.
auto &Layout = CGF.getContext().getASTRecordLayout(MD->getParent());
if (MD->getParent()->getNumVBases() == 0 || // avoid vcall in common case
- MD->getParent()->hasAttr<FinalAttr>() ||
- !isThisCompleteObject(CGF.CurGD)) {
+ MD->getParent()->isEffectivelyFinal() ||
+ isThisCompleteObject(CGF.CurGD)) {
CGF.CXXABIThisAlignment = Layout.getAlignment();
} else {
CGF.CXXABIThisAlignment = Layout.getNonVirtualAlignment();
@@ -251,28 +251,6 @@ llvm::Constant *CGCXXABI::getMemberPointerAdjustment(const CastExpr *E) {
E->path_end());
}
-CharUnits CGCXXABI::getMemberPointerPathAdjustment(const APValue &MP) {
- // TODO: Store base specifiers in APValue member pointer paths so we can
- // easily reuse CGM.GetNonVirtualBaseClassOffset().
- const ValueDecl *MPD = MP.getMemberPointerDecl();
- CharUnits ThisAdjustment = CharUnits::Zero();
- ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
- bool DerivedMember = MP.isMemberPointerToDerivedMember();
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
- for (unsigned I = 0, N = Path.size(); I != N; ++I) {
- const CXXRecordDecl *Base = RD;
- const CXXRecordDecl *Derived = Path[I];
- if (DerivedMember)
- std::swap(Base, Derived);
- ThisAdjustment +=
- getContext().getASTRecordLayout(Derived).getBaseClassOffset(Base);
- RD = Path[I];
- }
- if (DerivedMember)
- ThisAdjustment = -ThisAdjustment;
- return ThisAdjustment;
-}
-
llvm::BasicBlock *
CGCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
diff --git a/clang/lib/CodeGen/CGCXXABI.h b/clang/lib/CodeGen/CGCXXABI.h
index f5b3fc13bbbd..ea839db7528e 100644
--- a/clang/lib/CodeGen/CGCXXABI.h
+++ b/clang/lib/CodeGen/CGCXXABI.h
@@ -146,6 +146,13 @@ public:
/// 'this' parameter of C++ instance methods.
virtual bool isSRetParameterAfterThis() const { return false; }
+ /// Returns true if the ABI permits the argument to be a homogeneous
+ /// aggregate.
+ virtual bool
+ isPermittedToBeHomogeneousAggregate(const CXXRecordDecl *RD) const {
+ return true;
+ };
+
/// Find the LLVM type used to represent the given member pointer
/// type.
virtual llvm::Type *
@@ -220,12 +227,6 @@ protected:
/// is required.
llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
- /// Computes the non-virtual adjustment needed for a member pointer
- /// conversion along an inheritance path stored in an APValue. Unlike
- /// getMemberPointerAdjustment(), the adjustment can be negative if the path
- /// is from a derived type to a base type.
- CharUnits getMemberPointerPathAdjustment(const APValue &MP);
-
public:
virtual void emitVirtualObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index e8235c775d8f..42801372189b 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -31,6 +31,7 @@
#include "clang/CodeGen/SwiftCallingConv.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Assumptions.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
@@ -198,7 +199,8 @@ CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
FTP);
}
-static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
+static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
+ bool IsWindows) {
// Set the appropriate calling convention for the Function.
if (D->hasAttr<StdCallAttr>())
return CC_X86StdCall;
@@ -1119,12 +1121,13 @@ void CodeGenFunction::ExpandTypeToArgs(
/// Create a temporary allocation for the purposes of coercion.
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
- CharUnits MinAlign) {
+ CharUnits MinAlign,
+ const Twine &Name = "tmp") {
// Don't use an alignment that's worse than what LLVM would prefer.
auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
- return CGF.CreateTempAlloca(Ty, Align);
+ return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
}
/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
@@ -1230,14 +1233,15 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
if (SrcTy == Ty)
return CGF.Builder.CreateLoad(Src);
- uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
+ llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
- Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
+ Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
+ DstSize.getFixedSize(), CGF);
SrcTy = Src.getElementType();
}
- uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
// If the source and destination are integer or pointer types, just do an
// extension or truncation to the desired type.
@@ -1248,7 +1252,8 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
}
// If load is legal, just bitcast the src pointer.
- if (SrcSize >= DstSize) {
+ if (!SrcSize.isScalable() && !DstSize.isScalable() &&
+ SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
// Generally SrcSize is never greater than DstSize, since this means we are
// losing bits. However, this can happen in cases where the structure has
// additional padding, for example due to a user specified alignment.
@@ -1260,11 +1265,28 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
return CGF.Builder.CreateLoad(Src);
}
+ // If coercing a fixed vector to a scalable vector for ABI compatibility, and
+ // the types match, use the llvm.experimental.vector.insert intrinsic to
+ // perform the conversion.
+ if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
+ if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
+ if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
+ auto *Load = CGF.Builder.CreateLoad(Src);
+ auto *UndefVec = llvm::UndefValue::get(ScalableDst);
+ auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
+ return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero,
+ "castScalableSve");
+ }
+ }
+ }
+
// Otherwise do coercion through memory. This is stupid, but simple.
- Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
- CGF.Builder.CreateMemCpy(Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
- Src.getPointer(), Src.getAlignment().getAsAlign(),
- llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize));
+ Address Tmp =
+ CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
+ CGF.Builder.CreateMemCpy(
+ Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
+ Src.getAlignment().getAsAlign(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
return CGF.Builder.CreateLoad(Tmp);
}
@@ -1303,10 +1325,11 @@ static void CreateCoercedStore(llvm::Value *Src,
return;
}
- uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
- Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
+ Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
+ SrcSize.getFixedSize(), CGF);
DstTy = Dst.getElementType();
}
@@ -1328,10 +1351,12 @@ static void CreateCoercedStore(llvm::Value *Src,
return;
}
- uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
+ llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
// If store is legal, just bitcast the src pointer.
- if (SrcSize <= DstSize) {
+ if (isa<llvm::ScalableVectorType>(SrcTy) ||
+ isa<llvm::ScalableVectorType>(DstTy) ||
+ SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
} else {
@@ -1346,9 +1371,10 @@ static void CreateCoercedStore(llvm::Value *Src,
// to that information.
Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
CGF.Builder.CreateStore(Src, Tmp);
- CGF.Builder.CreateMemCpy(Dst.getPointer(), Dst.getAlignment().getAsAlign(),
- Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
- llvm::ConstantInt::get(CGF.IntPtrTy, DstSize));
+ CGF.Builder.CreateMemCpy(
+ Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
+ Tmp.getAlignment().getAsAlign(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
}
}
@@ -1470,6 +1496,7 @@ void ClangToLLVMArgMapping::construct(const ASTContext &Context,
break;
}
case ABIArgInfo::Indirect:
+ case ABIArgInfo::IndirectAliased:
IRArgs.NumberOfArgs = 1;
break;
case ABIArgInfo::Ignore:
@@ -1560,6 +1587,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
const ABIArgInfo &retAI = FI.getReturnInfo();
switch (retAI.getKind()) {
case ABIArgInfo::Expand:
+ case ABIArgInfo::IndirectAliased:
llvm_unreachable("Invalid ABI kind for return argument");
case ABIArgInfo::Extend:
@@ -1637,7 +1665,12 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
CGM.getDataLayout().getAllocaAddrSpace());
break;
}
-
+ case ABIArgInfo::IndirectAliased: {
+ assert(NumIRArgs == 1);
+ llvm::Type *LTy = ConvertTypeForMem(it->type);
+ ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
+ break;
+ }
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
// Fast-isel and the optimizer generally like scalar values better than
@@ -1778,9 +1811,6 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
llvm::utostr(CodeGenOpts.SSPBufferSize));
FuncAttrs.addAttribute("no-signed-zeros-fp-math",
llvm::toStringRef(LangOpts.NoSignedZero));
- FuncAttrs.addAttribute(
- "correctly-rounded-divide-sqrt-fp-math",
- llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
// TODO: Reciprocal estimate codegen options should apply to instructions?
const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
@@ -1929,6 +1959,8 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
if (TargetDecl->hasAttr<ColdAttr>())
FuncAttrs.addAttribute(llvm::Attribute::Cold);
+ if (TargetDecl->hasAttr<HotAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::Hot);
if (TargetDecl->hasAttr<NoDuplicateAttr>())
FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
if (TargetDecl->hasAttr<ConvergentAttr>())
@@ -1953,6 +1985,10 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
NBA = Fn->getAttr<NoBuiltinAttr>();
}
+ // Only place nomerge attribute on call sites, never functions. This
+ // allows it to work on indirect virtual function calls.
+ if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
}
// 'const', 'pure' and 'noalias' attributed functions are also nounwind.
@@ -1975,6 +2011,8 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute("no_caller_saved_registers");
if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
+ if (TargetDecl->hasAttr<LeafAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
@@ -1999,6 +2037,18 @@ void CodeGenModule::ConstructAttributeList(
llvm::toStringRef(CodeGenOpts.UniformWGSize));
}
}
+
+ std::string AssumptionValueStr;
+ for (AssumptionAttr *AssumptionA :
+ TargetDecl->specific_attrs<AssumptionAttr>()) {
+ std::string AS = AssumptionA->getAssumption().str();
+ if (!AS.empty() && !AssumptionValueStr.empty())
+ AssumptionValueStr += ",";
+ AssumptionValueStr += AS;
+ }
+
+ if (!AssumptionValueStr.empty())
+ FuncAttrs.addAttribute(llvm::AssumptionAttrKey, AssumptionValueStr);
}
// Attach "no-builtins" attributes to:
@@ -2101,6 +2151,7 @@ void CodeGenModule::ConstructAttributeList(
break;
case ABIArgInfo::Expand:
+ case ABIArgInfo::IndirectAliased:
llvm_unreachable("Invalid ABI kind for return argument");
}
@@ -2125,7 +2176,7 @@ void CodeGenModule::ConstructAttributeList(
// Attach attributes to sret.
if (IRFunctionArgs.hasSRetArg()) {
llvm::AttrBuilder SRETAttrs;
- SRETAttrs.addAttribute(llvm::Attribute::StructRet);
+ SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
@@ -2142,6 +2193,36 @@ void CodeGenModule::ConstructAttributeList(
llvm::AttributeSet::get(getLLVMContext(), Attrs);
}
+ // Apply `nonnull` and `dereferencable(N)` to the `this` argument.
+ if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
+ !FI.arg_begin()->type->isVoidPointerType()) {
+ auto IRArgs = IRFunctionArgs.getIRArgs(0);
+
+ assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
+
+ llvm::AttrBuilder Attrs;
+
+ if (!CodeGenOpts.NullPointerIsValid &&
+ getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
+ Attrs.addAttribute(llvm::Attribute::NonNull);
+ Attrs.addDereferenceableAttr(
+ getMinimumObjectSize(
+ FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
+ .getQuantity());
+ } else {
+ // FIXME dereferenceable should be correct here, regardless of
+ // NullPointerIsValid. However, dereferenceable currently does not always
+ // respect NullPointerIsValid and may imply nonnull and break the program.
+ // See https://reviews.llvm.org/D66618 for discussions.
+ Attrs.addDereferenceableOrNullAttr(
+ getMinimumObjectSize(
+ FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
+ .getQuantity());
+ }
+
+ ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
+ }
+
unsigned ArgNo = 0;
for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
E = FI.arg_end();
@@ -2184,6 +2265,16 @@ void CodeGenModule::ConstructAttributeList(
if (AI.getIndirectByVal())
Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
+ auto *Decl = ParamType->getAsRecordDecl();
+ if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
+ Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
+ // When calling the function, the pointer passed in will be the only
+ // reference to the underlying object. Mark it accordingly.
+ Attrs.addAttribute(llvm::Attribute::NoAlias);
+
+ // TODO: We could add the byref attribute if not byval, but it would
+ // require updating many testcases.
+
CharUnits Align = AI.getIndirectAlign();
// In a byval argument, it is important that the required
@@ -2206,6 +2297,13 @@ void CodeGenModule::ConstructAttributeList(
// byval disables readnone and readonly.
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
+
+ break;
+ }
+ case ABIArgInfo::IndirectAliased: {
+ CharUnits Align = AI.getIndirectAlign();
+ Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
+ Attrs.addAlignmentAttr(Align.getQuantity());
break;
}
case ABIArgInfo::Ignore:
@@ -2243,7 +2341,7 @@ void CodeGenModule::ConstructAttributeList(
// Add 'sret' if we haven't already used it for something, but
// only if the result is void.
if (!hasUsedSRet && RetTy->isVoidType()) {
- Attrs.addAttribute(llvm::Attribute::StructRet);
+ Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
hasUsedSRet = true;
}
@@ -2254,8 +2352,8 @@ void CodeGenModule::ConstructAttributeList(
auto PTy = ParamType->getPointeeType();
if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
auto info = getContext().getTypeInfoInChars(PTy);
- Attrs.addDereferenceableAttr(info.first.getQuantity());
- Attrs.addAlignmentAttr(info.second.getAsAlign());
+ Attrs.addDereferenceableAttr(info.Width.getQuantity());
+ Attrs.addAlignmentAttr(info.Align.getAsAlign());
}
break;
}
@@ -2434,16 +2532,19 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
break;
}
- case ABIArgInfo::Indirect: {
+ case ABIArgInfo::Indirect:
+ case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
Address ParamAddr =
Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
if (!hasScalarEvaluationKind(Ty)) {
- // Aggregates and complex variables are accessed by reference. All we
- // need to do is realign the value, if requested.
+ // Aggregates and complex variables are accessed by reference. All we
+ // need to do is realign the value, if requested. Also, if the address
+ // may be aliased, copy it to ensure that the parameter variable is
+ // mutable and has a unique adress, as C requires.
Address V = ParamAddr;
- if (ArgI.getIndirectRealign()) {
+ if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
Address AlignedTemp = CreateMemTemp(Ty, "coerce");
// Copy from the incoming argument pointer to the temporary with the
@@ -2499,6 +2600,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// bytes).
if (ArrTy->getSizeModifier() == ArrayType::Static) {
QualType ETy = ArrTy->getElementType();
+ llvm::Align Alignment =
+ CGM.getNaturalTypeAlignment(ETy).getAsAlign();
+ AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
uint64_t ArrSize = ArrTy->getSize().getZExtValue();
if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
ArrSize) {
@@ -2518,10 +2622,15 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// For C99 VLAs with the static keyword, we don't know the size so
// we can't use the dereferenceable attribute, but in addrspace(0)
// we know that it must be nonnull.
- if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
- !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
- !CGM.getCodeGenOpts().NullPointerIsValid)
- AI->addAttr(llvm::Attribute::NonNull);
+ if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
+ QualType ETy = ArrTy->getElementType();
+ llvm::Align Alignment =
+ CGM.getNaturalTypeAlignment(ETy).getAsAlign();
+ AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
+ if (!getContext().getTargetAddressSpace(ETy) &&
+ !CGM.getCodeGenOpts().NullPointerIsValid)
+ AI->addAttr(llvm::Attribute::NonNull);
+ }
}
// Set `align` attribute if any.
@@ -2596,6 +2705,27 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
break;
}
+ // VLST arguments are coerced to VLATs at the function boundary for
+ // ABI consistency. If this is a VLST that was coerced to
+ // a VLAT at the function boundary and the types match up, use
+ // llvm.experimental.vector.extract to convert back to the original
+ // VLST.
+ if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
+ auto *Coerced = Fn->getArg(FirstIRArg);
+ if (auto *VecTyFrom =
+ dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
+ if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
+
+ assert(NumIRArgs == 1);
+ Coerced->setName(Arg->getName() + ".coerce");
+ ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
+ VecTyTo, Coerced, Zero, "castFixedSve")));
+ break;
+ }
+ }
+ }
+
Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
Arg->getName());
@@ -3089,7 +3219,7 @@ llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
const llvm::DataLayout &DataLayout = CGM.getDataLayout();
int Size = DataLayout.getTypeStoreSize(ITy);
SmallVector<uint64_t, 4> Bits(Size);
- setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits);
+ setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
int CharWidth = CGM.getContext().getCharWidth();
uint64_t Mask =
@@ -3106,7 +3236,7 @@ llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
const llvm::DataLayout &DataLayout = CGM.getDataLayout();
int Size = DataLayout.getTypeStoreSize(ATy);
SmallVector<uint64_t, 16> Bits(Size);
- setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits);
+ setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
// Clear each element of the LLVM array.
int CharWidth = CGM.getContext().getCharWidth();
@@ -3285,8 +3415,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
}
break;
}
-
case ABIArgInfo::Expand:
+ case ABIArgInfo::IndirectAliased:
llvm_unreachable("Invalid ABI kind for return argument");
}
@@ -3738,10 +3868,7 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
}
SanitizerScope SanScope(this);
- assert(RV.isScalar());
- llvm::Value *V = RV.getScalarVal();
- llvm::Value *Cond =
- Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
+ llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
@@ -3749,13 +3876,107 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
}
+// Check if the call is going to use the inalloca convention. This needs to
+// agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
+// later, so we can't check it directly.
+static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
+ ArrayRef<QualType> ArgTypes) {
+ // The Swift calling convention doesn't go through the target-specific
+ // argument classification, so it never uses inalloca.
+ // TODO: Consider limiting inalloca use to only calling conventions supported
+ // by MSVC.
+ if (ExplicitCC == CC_Swift)
+ return false;
+ if (!CGM.getTarget().getCXXABI().isMicrosoft())
+ return false;
+ return llvm::any_of(ArgTypes, [&](QualType Ty) {
+ return isInAllocaArgument(CGM.getCXXABI(), Ty);
+ });
+}
+
+#ifndef NDEBUG
+// Determine whether the given argument is an Objective-C method
+// that may have type parameters in its signature.
+static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
+ const DeclContext *dc = method->getDeclContext();
+ if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
+ return classDecl->getTypeParamListAsWritten();
+ }
+
+ if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
+ return catDecl->getTypeParamList();
+ }
+
+ return false;
+}
+#endif
+
+/// EmitCallArgs - Emit call arguments for a function.
void CodeGenFunction::EmitCallArgs(
- CallArgList &Args, ArrayRef<QualType> ArgTypes,
+ CallArgList &Args, PrototypeWrapper Prototype,
llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
+ SmallVector<QualType, 16> ArgTypes;
+
+ assert((ParamsToSkip == 0 || Prototype.P) &&
+ "Can't skip parameters if type info is not provided");
+
+ // This variable only captures *explicitly* written conventions, not those
+ // applied by default via command line flags or target defaults, such as
+ // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
+ // require knowing if this is a C++ instance method or being able to see
+ // unprototyped FunctionTypes.
+ CallingConv ExplicitCC = CC_C;
+
+ // First, if a prototype was provided, use those argument types.
+ bool IsVariadic = false;
+ if (Prototype.P) {
+ const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
+ if (MD) {
+ IsVariadic = MD->isVariadic();
+ ExplicitCC = getCallingConventionForDecl(
+ MD, CGM.getTarget().getTriple().isOSWindows());
+ ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
+ MD->param_type_end());
+ } else {
+ const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
+ IsVariadic = FPT->isVariadic();
+ ExplicitCC = FPT->getExtInfo().getCC();
+ ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
+ FPT->param_type_end());
+ }
+
+#ifndef NDEBUG
+ // Check that the prototyped types match the argument expression types.
+ bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
+ CallExpr::const_arg_iterator Arg = ArgRange.begin();
+ for (QualType Ty : ArgTypes) {
+ assert(Arg != ArgRange.end() && "Running over edge of argument list!");
+ assert(
+ (isGenericMethod || Ty->isVariablyModifiedType() ||
+ Ty.getNonReferenceType()->isObjCRetainableType() ||
+ getContext()
+ .getCanonicalType(Ty.getNonReferenceType())
+ .getTypePtr() ==
+ getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
+ "type mismatch in call argument!");
+ ++Arg;
+ }
+
+ // Either we've emitted all the call args, or we have a call to variadic
+ // function.
+ assert((Arg == ArgRange.end() || IsVariadic) &&
+ "Extra arguments in non-variadic function!");
+#endif
+ }
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()),
+ ArgRange.end()))
+ ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
- // We *have* to evaluate arguments from right to left in the MS C++ ABI,
+ // We must evaluate arguments from right to left in the MS C++ ABI,
// because arguments are destroyed left to right in the callee. As a special
// case, there are certain language constructs that require left-to-right
// evaluation, and in those cases we consider the evaluation order requirement
@@ -3788,15 +4009,10 @@ void CodeGenFunction::EmitCallArgs(
};
// Insert a stack save if we're going to need any inalloca args.
- bool HasInAllocaArgs = false;
- if (CGM.getTarget().getCXXABI().isMicrosoft()) {
- for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
- I != E && !HasInAllocaArgs; ++I)
- HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
- if (HasInAllocaArgs) {
- assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
- Args.allocateArgumentMemory(*this);
- }
+ if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
+ assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&
+ "inalloca only supported on x86");
+ Args.allocateArgumentMemory(*this);
}
// Evaluate each argument in the appropriate order.
@@ -4413,7 +4629,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
break;
}
- case ABIArgInfo::Indirect: {
+ case ABIArgInfo::Indirect:
+ case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
if (!I->isAggregate()) {
// Make a temporary alloca to pass the argument.
@@ -4668,12 +4885,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
break;
}
- case ABIArgInfo::Expand:
+ case ABIArgInfo::Expand: {
unsigned IRArgPos = FirstIRArg;
ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
assert(IRArgPos == FirstIRArg + NumIRArgs);
break;
}
+ }
}
const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
@@ -4796,7 +5014,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
/*AttrOnCallSite=*/true);
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
- if (FD->usesFPIntrin())
+ if (FD->hasAttr<StrictFPAttr>())
// All calls within a strictfp function are marked strictfp
Attrs =
Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
@@ -4805,8 +5023,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Add call-site nomerge attribute if exists.
if (InNoMergeAttributedStmt)
Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoMerge);
+ Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoMerge);
// Apply some call-site-specific attributes.
// TODO: work this into building the attribute set.
@@ -4841,6 +5059,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else {
// Otherwise, nounwind call sites will never throw.
CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
+
+ if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
+ if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
+ CannotThrow = true;
}
// If we made a temporary, be sure to clean up after ourselves. Note that we
@@ -4857,7 +5079,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
getBundlesForFunclet(CalleePtr);
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
- if (FD->usesFPIntrin())
+ if (FD->hasAttr<StrictFPAttr>())
// All calls within a strictfp function are marked strictfp
Attrs =
Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
@@ -5080,6 +5302,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
case ABIArgInfo::Expand:
+ case ABIArgInfo::IndirectAliased:
llvm_unreachable("Invalid ABI kind for return argument");
}
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
index 509ca43a9784..e3d9fec6d363 100644
--- a/clang/lib/CodeGen/CGCall.h
+++ b/clang/lib/CodeGen/CGCall.h
@@ -110,7 +110,8 @@ public:
/// Construct a callee. Call this constructor directly when this
/// isn't a direct call.
CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr)
- : KindOrFunctionPointer(SpecialKind(uintptr_t(functionPtr))) {
+ : KindOrFunctionPointer(
+ SpecialKind(reinterpret_cast<uintptr_t>(functionPtr))) {
AbstractInfo = abstractInfo;
assert(functionPtr && "configuring callee without function pointer");
assert(functionPtr->getType()->isPointerTy());
@@ -186,7 +187,8 @@ public:
}
void setFunctionPointer(llvm::Value *functionPtr) {
assert(isOrdinary());
- KindOrFunctionPointer = SpecialKind(uintptr_t(functionPtr));
+ KindOrFunctionPointer =
+ SpecialKind(reinterpret_cast<uintptr_t>(functionPtr));
}
bool isVirtual() const {
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 4d143e3e1bdf..ba221dbbc83b 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -18,6 +18,7 @@
#include "TargetInfo.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/RecordLayout.h"
@@ -798,9 +799,8 @@ void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) {
size_t NumFields = 0;
for (const auto *Field : ClassDecl->fields()) {
const FieldDecl *D = Field;
- std::pair<CharUnits, CharUnits> FieldInfo =
- Context.getTypeInfoInChars(D->getType());
- CharUnits FieldSize = FieldInfo.first;
+ auto FieldInfo = Context.getTypeInfoInChars(D->getType());
+ CharUnits FieldSize = FieldInfo.Width;
assert(NumFields < SSV.size());
SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity();
NumFields++;
@@ -947,7 +947,7 @@ namespace {
LastField->isBitField()
? LastField->getBitWidthValue(Ctx)
: Ctx.toBits(
- Ctx.getTypeInfoDataSizeInChars(LastField->getType()).first);
+ Ctx.getTypeInfoDataSizeInChars(LastField->getType()).Width);
uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize -
FirstByteOffset + Ctx.getCharWidth() - 1;
CharUnits MemcpySize = Ctx.toCharUnitsFromBits(MemcpySizeBits);
@@ -1694,28 +1694,33 @@ namespace {
// Construct pointer to region to begin poisoning, and calculate poison
// size, so that only members declared in this class are poisoned.
ASTContext &Context = CGF.getContext();
- unsigned fieldIndex = 0;
- int startIndex = -1;
- // RecordDecl::field_iterator Field;
- for (const FieldDecl *Field : Dtor->getParent()->fields()) {
- // Poison field if it is trivial
- if (FieldHasTrivialDestructorBody(Context, Field)) {
- // Start sanitizing at this field
- if (startIndex < 0)
- startIndex = fieldIndex;
-
- // Currently on the last field, and it must be poisoned with the
- // current block.
- if (fieldIndex == Layout.getFieldCount() - 1) {
- PoisonMembers(CGF, startIndex, Layout.getFieldCount());
- }
- } else if (startIndex >= 0) {
- // No longer within a block of memory to poison, so poison the block
- PoisonMembers(CGF, startIndex, fieldIndex);
- // Re-set the start index
- startIndex = -1;
- }
- fieldIndex += 1;
+
+ const RecordDecl *Decl = Dtor->getParent();
+ auto Fields = Decl->fields();
+ auto IsTrivial = [&](const FieldDecl *F) {
+ return FieldHasTrivialDestructorBody(Context, F);
+ };
+
+ auto IsZeroSize = [&](const FieldDecl *F) {
+ return F->isZeroSize(Context);
+ };
+
+ // Poison blocks of fields with trivial destructors making sure that block
+ // begin and end do not point to zero-sized fields. They don't have
+ // correct offsets so can't be used to calculate poisoning range.
+ for (auto It = Fields.begin(); It != Fields.end();) {
+ It = std::find_if(It, Fields.end(), [&](const FieldDecl *F) {
+ return IsTrivial(F) && !IsZeroSize(F);
+ });
+ if (It == Fields.end())
+ break;
+ auto Start = It++;
+ It = std::find_if(It, Fields.end(), [&](const FieldDecl *F) {
+ return !IsTrivial(F) && !IsZeroSize(F);
+ });
+
+ PoisonMembers(CGF, (*Start)->getFieldIndex(),
+ It == Fields.end() ? -1 : (*It)->getFieldIndex());
}
}
@@ -1725,37 +1730,35 @@ namespace {
/// \param layoutEndOffset index of the ASTRecordLayout field to
/// end poisoning (exclusive)
void PoisonMembers(CodeGenFunction &CGF, unsigned layoutStartOffset,
- unsigned layoutEndOffset) {
+ unsigned layoutEndOffset) {
ASTContext &Context = CGF.getContext();
const ASTRecordLayout &Layout =
Context.getASTRecordLayout(Dtor->getParent());
- llvm::ConstantInt *OffsetSizePtr = llvm::ConstantInt::get(
- CGF.SizeTy,
- Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutStartOffset))
- .getQuantity());
+ // It's a first trivia field so it should be at the begining of char,
+ // still round up start offset just in case.
+ CharUnits PoisonStart =
+ Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutStartOffset) +
+ Context.getCharWidth() - 1);
+ llvm::ConstantInt *OffsetSizePtr =
+ llvm::ConstantInt::get(CGF.SizeTy, PoisonStart.getQuantity());
llvm::Value *OffsetPtr = CGF.Builder.CreateGEP(
CGF.Builder.CreateBitCast(CGF.LoadCXXThis(), CGF.Int8PtrTy),
OffsetSizePtr);
- CharUnits::QuantityType PoisonSize;
+ CharUnits PoisonEnd;
if (layoutEndOffset >= Layout.getFieldCount()) {
- PoisonSize = Layout.getNonVirtualSize().getQuantity() -
- Context.toCharUnitsFromBits(
- Layout.getFieldOffset(layoutStartOffset))
- .getQuantity();
+ PoisonEnd = Layout.getNonVirtualSize();
} else {
- PoisonSize = Context.toCharUnitsFromBits(
- Layout.getFieldOffset(layoutEndOffset) -
- Layout.getFieldOffset(layoutStartOffset))
- .getQuantity();
+ PoisonEnd =
+ Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutEndOffset));
}
-
- if (PoisonSize == 0)
+ CharUnits PoisonSize = PoisonEnd - PoisonStart;
+ if (!PoisonSize.isPositive())
return;
- EmitSanitizerDtorCallback(CGF, OffsetPtr, PoisonSize);
+ EmitSanitizerDtorCallback(CGF, OffsetPtr, PoisonSize.getQuantity());
}
};
@@ -2509,12 +2512,16 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
// Finally, store the address point. Use the same LLVM types as the field to
// support optimization.
+ unsigned GlobalsAS = CGM.getDataLayout().getDefaultGlobalsAddressSpace();
+ unsigned ProgAS = CGM.getDataLayout().getProgramAddressSpace();
llvm::Type *VTablePtrTy =
llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true)
- ->getPointerTo()
- ->getPointerTo();
- VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo());
- VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
+ ->getPointerTo(ProgAS)
+ ->getPointerTo(GlobalsAS);
+ VTableField = Builder.CreatePointerBitCastOrAddrSpaceCast(
+ VTableField, VTablePtrTy->getPointerTo(GlobalsAS));
+ VTableAddressPoint = Builder.CreatePointerBitCastOrAddrSpaceCast(
+ VTableAddressPoint, VTablePtrTy);
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTablePtrTy);
@@ -2796,7 +2803,7 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
}
if (CGM.getCodeGenOpts().SanitizeTrap.has(M)) {
- EmitTrapCheck(TypeTest);
+ EmitTrapCheck(TypeTest, SanitizerHandler::CFICheckFail);
return;
}
diff --git a/clang/lib/CodeGen/CGCleanup.h b/clang/lib/CodeGen/CGCleanup.h
index ef4f6b9ec133..1b54c0018d27 100644
--- a/clang/lib/CodeGen/CGCleanup.h
+++ b/clang/lib/CodeGen/CGCleanup.h
@@ -612,6 +612,7 @@ struct EHPersonality {
static const EHPersonality MSVC_C_specific_handler;
static const EHPersonality MSVC_CxxFrameHandler3;
static const EHPersonality GNU_Wasm_CPlusPlus;
+ static const EHPersonality XL_CPlusPlus;
/// Does this personality use landingpads or the family of pad instructions
/// designed to form funclets?
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 6965c4a1209c..99944afaad14 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -115,8 +115,9 @@ void ApplyDebugLocation::init(SourceLocation TemporaryLocation,
// Construct a location that has a valid scope, but no line info.
assert(!DI->LexicalBlockStack.empty());
- CGF->Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
- 0, 0, DI->LexicalBlockStack.back(), DI->getInlinedAt()));
+ CGF->Builder.SetCurrentDebugLocation(
+ llvm::DILocation::get(DI->LexicalBlockStack.back()->getContext(), 0, 0,
+ DI->LexicalBlockStack.back(), DI->getInlinedAt()));
}
ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E)
@@ -253,24 +254,12 @@ StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
FunctionTemplateSpecializationInfo *Info =
FD->getTemplateSpecializationInfo();
- // Emit the unqualified name in normal operation. LLVM and the debugger can
- // compute the fully qualified name from the scope chain. If we're only
- // emitting line table info, there won't be any scope chains, so emit the
- // fully qualified name here so that stack traces are more accurate.
- // FIXME: Do this when emitting DWARF as well as when emitting CodeView after
- // evaluating the size impact.
- bool UseQualifiedName = DebugKind == codegenoptions::DebugLineTablesOnly &&
- CGM.getCodeGenOpts().EmitCodeView;
-
- if (!Info && FII && !UseQualifiedName)
+ if (!Info && FII)
return FII->getName();
SmallString<128> NS;
llvm::raw_svector_ostream OS(NS);
- if (!UseQualifiedName)
- FD->printName(OS);
- else
- FD->printQualifiedName(OS, getPrintingPolicy());
+ FD->printName(OS);
// Add any template specialization args.
if (Info) {
@@ -374,9 +363,8 @@ CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const {
return None;
SourceManager &SM = CGM.getContext().getSourceManager();
- bool Invalid;
- const llvm::MemoryBuffer *MemBuffer = SM.getBuffer(FID, &Invalid);
- if (Invalid)
+ Optional<llvm::MemoryBufferRef> MemBuffer = SM.getBufferOrNone(FID);
+ if (!MemBuffer)
return None;
llvm::MD5 Hash;
@@ -404,17 +392,26 @@ Optional<StringRef> CGDebugInfo::getSource(const SourceManager &SM,
}
llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
- if (!Loc.isValid())
- // If Location is not valid then use main input file.
- return TheCU->getFile();
-
SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(Loc);
-
- StringRef FileName = PLoc.getFilename();
- if (PLoc.isInvalid() || FileName.empty())
- // If the location is not valid then use main input file.
- return TheCU->getFile();
+ StringRef FileName;
+ FileID FID;
+
+ if (Loc.isInvalid()) {
+ // The DIFile used by the CU is distinct from the main source file. Call
+ // createFile() below for canonicalization if the source file was specified
+ // with an absolute path.
+ FileName = TheCU->getFile()->getFilename();
+ } else {
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ FileName = PLoc.getFilename();
+
+ if (FileName.empty()) {
+ FileName = TheCU->getFile()->getFilename();
+ } else {
+ FileName = PLoc.getFilename();
+ }
+ FID = PLoc.getFileID();
+ }
// Cache the results.
auto It = DIFileCache.find(FileName.data());
@@ -426,11 +423,7 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
SmallString<32> Checksum;
- // Compute the checksum if possible. If the location is affected by a #line
- // directive that refers to a file, PLoc will have an invalid FileID, and we
- // will correctly get no checksum.
- Optional<llvm::DIFile::ChecksumKind> CSKind =
- computeChecksum(PLoc.getFileID(), Checksum);
+ Optional<llvm::DIFile::ChecksumKind> CSKind = computeChecksum(FID, Checksum);
Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
@@ -488,11 +481,10 @@ std::string CGDebugInfo::remapDIPath(StringRef Path) const {
}
unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
- if (Loc.isInvalid() && CurLoc.isInvalid())
+ if (Loc.isInvalid())
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
- return PLoc.isValid() ? PLoc.getLine() : 0;
+ return SM.getPresumedLoc(Loc).getLine();
}
unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) {
@@ -606,6 +598,7 @@ void CGDebugInfo::CreateCompileUnit() {
case codegenoptions::DebugInfoConstructor:
case codegenoptions::LimitedDebugInfo:
case codegenoptions::FullDebugInfo:
+ case codegenoptions::UnusedTypeInfo:
EmissionKind = llvm::DICompileUnit::FullDebug;
break;
}
@@ -719,22 +712,45 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::Id: \
return getOrCreateStructPtrType("opencl_" #ExtType, Id##Ty);
#include "clang/Basic/OpenCLExtensionTypes.def"
- // TODO: real support for SVE types requires more infrastructure
- // to be added first. The types have a variable length and are
- // represented in debug info as types whose length depends on a
- // target-specific pseudo register.
-#define SVE_TYPE(Name, Id, SingletonId) \
- case BuiltinType::Id:
+
+#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
- {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error,
- "cannot yet generate debug info for SVE type '%0'");
- auto Name = BT->getName(CGM.getContext().getPrintingPolicy());
- CGM.getDiags().Report(DiagID) << Name;
- // Return something safe.
+ {
+ ASTContext::BuiltinVectorTypeInfo Info =
+ CGM.getContext().getBuiltinVectorTypeInfo(BT);
+ unsigned NumElemsPerVG = (Info.EC.getKnownMinValue() * Info.NumVectors) / 2;
+
+ // Debuggers can't extract 1bit from a vector, so will display a
+ // bitpattern for svbool_t instead.
+ if (Info.ElementType == CGM.getContext().BoolTy) {
+ NumElemsPerVG /= 8;
+ Info.ElementType = CGM.getContext().UnsignedCharTy;
+ }
+
+ auto *LowerBound =
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0));
+ SmallVector<int64_t, 9> Expr(
+ {llvm::dwarf::DW_OP_constu, NumElemsPerVG, llvm::dwarf::DW_OP_bregx,
+ /* AArch64::VG */ 46, 0, llvm::dwarf::DW_OP_mul,
+ llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
+ auto *UpperBound = DBuilder.createExpression(Expr);
+
+ llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(
+ /*count*/ nullptr, LowerBound, UpperBound, /*stride*/ nullptr);
+ llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
+ llvm::DIType *ElemTy =
+ getOrCreateType(Info.ElementType, TheCU->getFile());
+ auto Align = getTypeAlignIfRequired(BT, CGM.getContext());
+ return DBuilder.createVectorType(/*Size*/ 0, Align, ElemTy,
+ SubscriptArray);
+ }
+ // It doesn't make sense to generate debug info for PowerPC MMA vector types.
+ // So we return a safe type here to avoid generating an error.
+#define PPC_VECTOR_TYPE(Name, Id, size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
return CreateType(cast<const BuiltinType>(CGM.getContext().IntTy));
- }
case BuiltinType::UChar:
case BuiltinType::Char_U:
@@ -1008,12 +1024,17 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
if (llvm::DIType *T = getTypeOrNull(CGM.getContext().getRecordType(RD)))
return cast<llvm::DICompositeType>(T);
llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
- unsigned Line = getLineNumber(RD->getLocation());
+ const unsigned Line =
+ getLineNumber(RD->getLocation().isValid() ? RD->getLocation() : CurLoc);
StringRef RDName = getClassName(RD);
uint64_t Size = 0;
uint32_t Align = 0;
+ const RecordDecl *D = RD->getDefinition();
+ if (D && D->isCompleteDefinition())
+ Size = CGM.getContext().getTypeSize(Ty);
+
llvm::DINode::DIFlags Flags = llvm::DINode::FlagFwdDecl;
// Add flag to nontrivial forward declarations. To be consistent with MSVC,
@@ -1025,7 +1046,10 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
Flags |= llvm::DINode::FlagNonTrivial;
// Create the type.
- SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
+ SmallString<256> Identifier;
+ // Don't include a linkage name in line tables only.
+ if (CGM.getCodeGenOpts().hasReducedDebugInfo())
+ Identifier = getTypeIdentifier(Ty, CGM, TheCU);
llvm::DICompositeType *RetTy = DBuilder.createReplaceableCompositeType(
getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align, Flags,
Identifier);
@@ -1325,7 +1349,7 @@ CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc,
// Get the location for the field.
llvm::DIFile *file = getOrCreateFile(loc);
- unsigned line = getLineNumber(loc);
+ const unsigned line = getLineNumber(loc.isValid() ? loc : CurLoc);
uint64_t SizeInBits = 0;
auto Align = AlignInBits;
@@ -1705,7 +1729,7 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
// info is emitted.
if (DebugKind == codegenoptions::DebugInfoConstructor)
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
- completeClass(CD->getParent());
+ completeUnusedClass(*CD->getParent());
llvm::DINodeArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
llvm::DISubprogram *SP = DBuilder.createMethod(
@@ -1893,6 +1917,12 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars);
} else if (const auto *GD = dyn_cast<MSGuidDecl>(D)) {
V = CGM.GetAddrOfMSGuidDecl(GD).getPointer();
+ } else if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(D)) {
+ if (T->isRecordType())
+ V = ConstantEmitter(CGM).emitAbstract(
+ SourceLocation(), TPO->getValue(), TPO->getType());
+ else
+ V = CGM.GetAddrOfTemplateParamObject(TPO).getPointer();
}
assert(V && "Failed to find template parameter pointer");
V = V->stripPointerCasts();
@@ -2020,7 +2050,8 @@ StringRef CGDebugInfo::getDynamicInitializerName(const VarDecl *VD,
llvm::Function *InitFn) {
// If we're not emitting codeview, use the mangled name. For Itanium, this is
// arbitrary.
- if (!CGM.getCodeGenOpts().EmitCodeView)
+ if (!CGM.getCodeGenOpts().EmitCodeView ||
+ StubKind == DynamicInitKind::GlobalArrayDestructor)
return InitFn->getName();
// Print the normal qualified name for the variable, then break off the last
@@ -2045,6 +2076,7 @@ StringRef CGDebugInfo::getDynamicInitializerName(const VarDecl *VD,
switch (StubKind) {
case DynamicInitKind::NoStub:
+ case DynamicInitKind::GlobalArrayDestructor:
llvm_unreachable("not an initializer");
case DynamicInitKind::Initializer:
OS << "`dynamic initializer for '";
@@ -2068,8 +2100,7 @@ StringRef CGDebugInfo::getDynamicInitializerName(const VarDecl *VD,
}
void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit,
- SmallVectorImpl<llvm::Metadata *> &EltTys,
- llvm::DICompositeType *RecordTy) {
+ SmallVectorImpl<llvm::Metadata *> &EltTys) {
// If this class is not dynamic then there is not any vtable info to collect.
if (!RD->isDynamicClass())
return;
@@ -2259,6 +2290,23 @@ static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I,
return false;
}
+static bool canUseCtorHoming(const CXXRecordDecl *RD) {
+ // Constructor homing can be used for classes that cannnot be constructed
+ // without emitting code for one of their constructors. This is classes that
+ // don't have trivial or constexpr constructors, or can be created from
+ // aggregate initialization. Also skip lambda objects because they don't call
+ // constructors.
+
+ // Skip this optimization if the class or any of its methods are marked
+ // dllimport.
+ if (isClassOrMethodDLLImport(RD))
+ return false;
+
+ return !RD->isLambda() && !RD->isAggregate() &&
+ !RD->hasTrivialDefaultConstructor() &&
+ !RD->hasConstexprNonCopyMoveConstructor();
+}
+
static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
bool DebugTypeExtRefs, const RecordDecl *RD,
const LangOptions &LangOpts) {
@@ -2269,6 +2317,12 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
if (ES->hasExternalDefinitions(RD) == ExternalASTSource::EK_Always)
return true;
+ // Only emit forward declarations in line tables only to keep debug info size
+ // small. This only applies to CodeView, since we don't emit types in DWARF
+ // line tables only.
+ if (DebugKind == codegenoptions::DebugLineTablesOnly)
+ return true;
+
if (DebugKind > codegenoptions::LimitedDebugInfo)
return false;
@@ -2293,16 +2347,6 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
!isClassOrMethodDLLImport(CXXDecl))
return true;
- // In constructor debug mode, only emit debug info for a class when its
- // constructor is emitted. Skip this optimization if the class or any of
- // its methods are marked dllimport.
- if (DebugKind == codegenoptions::DebugInfoConstructor &&
- !CXXDecl->isLambda() && !CXXDecl->hasConstexprNonCopyMoveConstructor() &&
- !isClassOrMethodDLLImport(CXXDecl))
- for (const auto *Ctor : CXXDecl->ctors())
- if (Ctor->isUserProvided())
- return true;
-
TemplateSpecializationKind Spec = TSK_Undeclared;
if (const auto *SD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
Spec = SD->getSpecializationKind();
@@ -2312,6 +2356,12 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
CXXDecl->method_end()))
return true;
+ // In constructor homing mode, only emit complete debug info for a class
+ // when its constructor is emitted.
+ if ((DebugKind == codegenoptions::DebugInfoConstructor) &&
+ canUseCtorHoming(CXXDecl))
+ return true;
+
return false;
}
@@ -2350,7 +2400,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
- llvm::DICompositeType *FwdDecl = getOrCreateLimitedType(Ty, DefUnit);
+ llvm::DICompositeType *FwdDecl = getOrCreateLimitedType(Ty);
const RecordDecl *D = RD->getDefinition();
if (!D || !D->isCompleteDefinition())
@@ -2374,7 +2424,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
const auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (CXXDecl) {
CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl);
- CollectVTableInfo(CXXDecl, DefUnit, EltTys, FwdDecl);
+ CollectVTableInfo(CXXDecl, DefUnit, EltTys);
}
// Collect data fields (including static variables and any initializers).
@@ -2537,12 +2587,11 @@ llvm::DIModule *CGDebugInfo::getOrCreateModuleRef(ASTSourceDescriptor Mod,
// We use the lower 64 bits for debug info.
uint64_t Signature = 0;
- if (const auto &ModSig = Mod.getSignature()) {
- for (unsigned I = 0; I != sizeof(Signature); ++I)
- Signature |= (uint64_t)ModSig[I] << (I * 8);
- } else {
+ if (const auto &ModSig = Mod.getSignature())
+ Signature = ModSig.truncatedValue();
+ else
Signature = ~1ULL;
- }
+
llvm::DIBuilder DIB(CGM.getModule());
SmallString<0> PCM;
if (!llvm::sys::path::is_absolute(Mod.getASTFile()))
@@ -3079,10 +3128,7 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
}
llvm::DIType *CGDebugInfo::getTypeOrNull(QualType Ty) {
-
- // Unwrap the type as needed for debug information.
- Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext());
-
+ assert(Ty == UnwrapTypeForDebugInfo(Ty, CGM.getContext()));
auto It = TypeCache.find(Ty.getAsOpaquePtr());
if (It != TypeCache.end()) {
// Verify that the debug info still exists.
@@ -3095,8 +3141,6 @@ llvm::DIType *CGDebugInfo::getTypeOrNull(QualType Ty) {
void CGDebugInfo::completeTemplateDefinition(
const ClassTemplateSpecializationDecl &SD) {
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
- return;
completeUnusedClass(SD);
}
@@ -3252,15 +3296,14 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::TypeOf:
case Type::Decltype:
case Type::UnaryTransform:
- case Type::PackExpansion:
break;
}
llvm_unreachable("type should have been unwrapped!");
}
-llvm::DICompositeType *CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty,
- llvm::DIFile *Unit) {
+llvm::DICompositeType *
+CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty) {
QualType QTy(Ty, 0);
auto *T = cast_or_null<llvm::DICompositeType>(getTypeOrNull(QTy));
@@ -3289,9 +3332,14 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
// Get overall information about the record type for the debug info.
- llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
- unsigned Line = getLineNumber(RD->getLocation());
StringRef RDName = getClassName(RD);
+ const SourceLocation Loc = RD->getLocation();
+ llvm::DIFile *DefUnit = nullptr;
+ unsigned Line = 0;
+ if (Loc.isValid()) {
+ DefUnit = getOrCreateFile(Loc);
+ Line = getLineNumber(Loc);
+ }
llvm::DIScope *RDContext = getDeclContextDescriptor(RD);
@@ -3427,7 +3475,11 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
DebugKind <= codegenoptions::DebugLineTablesOnly))
LinkageName = StringRef();
- if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
+ // Emit the function scope in line tables only mode (if CodeView) to
+ // differentiate between function names.
+ if (CGM.getCodeGenOpts().hasReducedDebugInfo() ||
+ (DebugKind == codegenoptions::DebugLineTablesOnly &&
+ CGM.getCodeGenOpts().EmitCodeView)) {
if (const NamespaceDecl *NSDecl =
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNamespace(NSDecl);
@@ -3436,6 +3488,8 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
llvm::DIScope *Mod = getParentModuleOrNull(RDecl);
FDContext = getContextDescriptor(RDecl, Mod ? Mod : TheCU);
}
+ }
+ if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
// Check if it is a noreturn-marked function
if (FD->isNoReturn())
Flags |= llvm::DINode::FlagNoReturn;
@@ -3680,7 +3734,10 @@ llvm::DISubprogram *CGDebugInfo::getObjCMethodDeclaration(
llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
QualType FnType,
llvm::DIFile *F) {
- if (!D || DebugKind <= codegenoptions::DebugLineTablesOnly)
+ // In CodeView, we emit the function types in line tables only because the
+ // only way to distinguish between functions is by display name and type.
+ if (!D || (DebugKind <= codegenoptions::DebugLineTablesOnly &&
+ !CGM.getCodeGenOpts().EmitCodeView))
// Create fake but valid subroutine type. Otherwise -verify would fail, and
// subprogram DIE will miss DW_AT_decl_file and DW_AT_decl_line fields.
return DBuilder.createSubroutineType(DBuilder.getOrCreateTypeArray(None));
@@ -3747,11 +3804,9 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F));
}
-void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
+void CGDebugInfo::emitFunctionStart(GlobalDecl GD, SourceLocation Loc,
SourceLocation ScopeLoc, QualType FnType,
- llvm::Function *Fn, bool CurFuncIsThunk,
- CGBuilderTy &Builder) {
-
+ llvm::Function *Fn, bool CurFuncIsThunk) {
StringRef Name;
StringRef LinkageName;
@@ -3800,7 +3855,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
if (Name.startswith("\01"))
Name = Name.substr(1);
- if (!HasDecl || D->isImplicit() || D->hasAttr<ArtificialAttr>()) {
+ if (!HasDecl || D->isImplicit() || D->hasAttr<ArtificialAttr>() ||
+ (isa<VarDecl>(D) && GD.getDynamicInitKind() != DynamicInitKind::NoStub)) {
Flags |= llvm::DINode::FlagArtificial;
// Artificial functions should not silently reuse CurLoc.
CurLoc = SourceLocation();
@@ -3818,7 +3874,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
llvm::DISubprogram::DISPFlags SPFlagsForDef =
SPFlags | llvm::DISubprogram::SPFlagDefinition;
- unsigned LineNo = getLineNumber(Loc);
+ const unsigned LineNo = getLineNumber(Loc.isValid() ? Loc : CurLoc);
unsigned ScopeLine = getLineNumber(ScopeLoc);
llvm::DISubroutineType *DIFnType = getOrCreateFunctionType(D, FnType, Unit);
llvm::DISubprogram *Decl = nullptr;
@@ -3968,8 +4024,9 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
return;
llvm::MDNode *Scope = LexicalBlockStack.back();
- Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
- getLineNumber(CurLoc), getColumnNumber(CurLoc), Scope, CurInlinedAt));
+ Builder.SetCurrentDebugLocation(
+ llvm::DILocation::get(CGM.getLLVMContext(), getLineNumber(CurLoc),
+ getColumnNumber(CurLoc), Scope, CurInlinedAt));
}
void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
@@ -4000,9 +4057,9 @@ void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder,
setLocation(Loc);
// Emit a line table change for the current location inside the new scope.
- Builder.SetCurrentDebugLocation(
- llvm::DebugLoc::get(getLineNumber(Loc), getColumnNumber(Loc),
- LexicalBlockStack.back(), CurInlinedAt));
+ Builder.SetCurrentDebugLocation(llvm::DILocation::get(
+ CGM.getLLVMContext(), getLineNumber(Loc), getColumnNumber(Loc),
+ LexicalBlockStack.back(), CurInlinedAt));
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
@@ -4213,10 +4270,11 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
Flags | llvm::DINode::FlagArtificial, FieldAlign);
// Insert an llvm.dbg.declare into the current block.
- DBuilder.insertDeclare(
- Storage, D, DBuilder.createExpression(Expr),
- llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt),
- Builder.GetInsertBlock());
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
+ llvm::DILocation::get(CGM.getLLVMContext(), Line,
+ Column, Scope,
+ CurInlinedAt),
+ Builder.GetInsertBlock());
}
}
}
@@ -4241,7 +4299,8 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
- llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt),
+ llvm::DILocation::get(CGM.getLLVMContext(), Line,
+ Column, Scope, CurInlinedAt),
Builder.GetInsertBlock());
return D;
@@ -4277,7 +4336,8 @@ void CGDebugInfo::EmitLabel(const LabelDecl *D, CGBuilderTy &Builder) {
// Insert an llvm.dbg.label into the current block.
DBuilder.insertLabel(L,
- llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt),
+ llvm::DILocation::get(CGM.getLLVMContext(), Line, Column,
+ Scope, CurInlinedAt),
Builder.GetInsertBlock());
}
@@ -4317,7 +4377,8 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
Ty = CreateSelfType(VD->getType(), Ty);
// Get location information.
- unsigned Line = getLineNumber(VD->getLocation());
+ const unsigned Line =
+ getLineNumber(VD->getLocation().isValid() ? VD->getLocation() : CurLoc);
unsigned Column = getColumnNumber(VD->getLocation());
const llvm::DataLayout &target = CGM.getDataLayout();
@@ -4351,8 +4412,8 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
Line, Ty, false, llvm::DINode::FlagZero, Align);
// Insert an llvm.dbg.declare into the current block.
- auto DL =
- llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back(), CurInlinedAt);
+ auto DL = llvm::DILocation::get(CGM.getLLVMContext(), Line, Column,
+ LexicalBlockStack.back(), CurInlinedAt);
auto *Expr = DBuilder.createExpression(addr);
if (InsertPoint)
DBuilder.insertDeclare(Storage, D, Expr, DL, InsertPoint);
@@ -4537,7 +4598,8 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Alloca, debugVar, DBuilder.createExpression(),
- llvm::DebugLoc::get(line, column, scope, CurInlinedAt),
+ llvm::DILocation::get(CGM.getLLVMContext(), line,
+ column, scope, CurInlinedAt),
Builder.GetInsertBlock());
}
@@ -4695,13 +4757,10 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
}
}
- llvm::DIScope *DContext = nullptr;
-
// Do not emit separate definitions for function local consts.
if (isa<FunctionDecl>(VD->getDeclContext()))
return;
- // Emit definition for static members in CodeView.
VD = cast<ValueDecl>(VD->getCanonicalDecl());
auto *VarD = dyn_cast<VarDecl>(VD);
if (VarD && VarD->isStaticDataMember()) {
@@ -4714,15 +4773,9 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
RetainedTypes.push_back(
CGM.getContext().getRecordType(RD).getAsOpaquePtr());
- if (!CGM.getCodeGenOpts().EmitCodeView)
- return;
-
- // Use the global scope for static members.
- DContext = getContextDescriptor(
- cast<Decl>(CGM.getContext().getTranslationUnitDecl()), TheCU);
- } else {
- DContext = getDeclContextDescriptor(VD);
+ return;
}
+ llvm::DIScope *DContext = getDeclContextDescriptor(VD);
auto &GV = DeclCache[VD];
if (GV)
@@ -4785,6 +4838,8 @@ void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) {
if (!NSDecl->isAnonymousNamespace() ||
CGM.getCodeGenOpts().DebugExplicitImport) {
auto Loc = UD.getLocation();
+ if (!Loc.isValid())
+ Loc = CurLoc;
DBuilder.createImportedModule(
getCurrentContextDescriptor(cast<Decl>(UD.getDeclContext())),
getOrCreateNamespace(NSDecl), getOrCreateFile(Loc), getLineNumber(Loc));
@@ -4954,13 +5009,17 @@ void CGDebugInfo::finalize() {
DBuilder.finalize();
}
+// Don't ignore in case of explicit cast where it is referenced indirectly.
void CGDebugInfo::EmitExplicitCastType(QualType Ty) {
- if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
- return;
+ if (CGM.getCodeGenOpts().hasReducedDebugInfo())
+ if (auto *DieTy = getOrCreateType(Ty, TheCU->getFile()))
+ DBuilder.retainType(DieTy);
+}
- if (auto *DieTy = getOrCreateType(Ty, TheCU->getFile()))
- // Don't ignore in case of explicit cast where it is referenced indirectly.
- DBuilder.retainType(DieTy);
+void CGDebugInfo::EmitAndRetainType(QualType Ty) {
+ if (CGM.getCodeGenOpts().hasMaybeUnusedDebugInfo())
+ if (auto *DieTy = getOrCreateType(Ty, TheCU->getFile()))
+ DBuilder.retainType(DieTy);
}
llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) {
@@ -4968,7 +5027,8 @@ llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) {
return llvm::DebugLoc();
llvm::MDNode *Scope = LexicalBlockStack.back();
- return llvm::DebugLoc::get(getLineNumber(Loc), getColumnNumber(Loc), Scope);
+ return llvm::DILocation::get(CGM.getLLVMContext(), getLineNumber(Loc),
+ getColumnNumber(Loc), Scope);
}
llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const {
diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h
index 96ef6c7c1d27..afd5b50c182a 100644
--- a/clang/lib/CodeGen/CGDebugInfo.h
+++ b/clang/lib/CodeGen/CGDebugInfo.h
@@ -324,8 +324,7 @@ class CGDebugInfo {
/// If the C++ class has vtable info then insert appropriate debug
/// info entry in EltTys vector.
void CollectVTableInfo(const CXXRecordDecl *Decl, llvm::DIFile *F,
- SmallVectorImpl<llvm::Metadata *> &EltTys,
- llvm::DICompositeType *RecordTy);
+ SmallVectorImpl<llvm::Metadata *> &EltTys);
/// @}
/// Create a new lexical block node and push it on the stack.
@@ -413,10 +412,9 @@ public:
/// start of a new function.
/// \param Loc The location of the function header.
/// \param ScopeLoc The location of the function body.
- void EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
+ void emitFunctionStart(GlobalDecl GD, SourceLocation Loc,
SourceLocation ScopeLoc, QualType FnType,
- llvm::Function *Fn, bool CurFnIsThunk,
- CGBuilderTy &Builder);
+ llvm::Function *Fn, bool CurFnIsThunk);
/// Start a new scope for an inlined function.
void EmitInlineFunctionStart(CGBuilderTy &Builder, GlobalDecl GD);
@@ -490,6 +488,9 @@ public:
/// Emit the type explicitly casted to.
void EmitExplicitCastType(QualType Ty);
+ /// Emit the type even if it might not be used.
+ void EmitAndRetainType(QualType Ty);
+
/// Emit C++ using declaration.
void EmitUsingDecl(const UsingDecl &UD);
@@ -600,8 +601,7 @@ private:
/// Get the type from the cache or create a new partial type if
/// necessary.
- llvm::DICompositeType *getOrCreateLimitedType(const RecordType *Ty,
- llvm::DIFile *F);
+ llvm::DICompositeType *getOrCreateLimitedType(const RecordType *Ty);
/// Create type metadata for a source language type.
llvm::DIType *CreateTypeNode(QualType Ty, llvm::DIFile *Fg);
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index 1729c7ed3c31..a01638f0b67b 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -100,15 +100,24 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::ObjCTypeParam:
case Decl::Binding:
llvm_unreachable("Declaration should not be in declstmts!");
- case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
+ case Decl::CXXRecord: // struct/union/class X; [C++]
+ if (CGDebugInfo *DI = getDebugInfo())
+ if (cast<RecordDecl>(D).getDefinition())
+ DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
+ return;
case Decl::Enum: // enum X;
+ if (CGDebugInfo *DI = getDebugInfo())
+ if (cast<EnumDecl>(D).getDefinition())
+ DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
+ return;
+ case Decl::Function: // void X();
case Decl::EnumConstant: // enum ? { X = ? }
- case Decl::CXXRecord: // struct/union/class X; [C++]
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
case Decl::Label: // __label__ x;
case Decl::Import:
case Decl::MSGuid: // __declspec(uuid("..."))
+ case Decl::TemplateParamObject:
case Decl::OMPThreadPrivate:
case Decl::OMPAllocate:
case Decl::OMPCapturedExpr:
@@ -157,12 +166,11 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Typedef: // typedef int X;
case Decl::TypeAlias: { // using X = int; [C++0x]
- const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
- QualType Ty = TD.getUnderlyingType();
-
+ QualType Ty = cast<TypedefNameDecl>(D).getUnderlyingType();
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitAndRetainType(Ty);
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
-
return;
}
}
@@ -345,12 +353,11 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
if (GV->getValueType() != Init->getType()) {
llvm::GlobalVariable *OldGV = GV;
- GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
- OldGV->isConstant(),
- OldGV->getLinkage(), Init, "",
- /*InsertBefore*/ OldGV,
- OldGV->getThreadLocalMode(),
- CGM.getContext().getTargetAddressSpace(D.getType()));
+ GV = new llvm::GlobalVariable(
+ CGM.getModule(), Init->getType(), OldGV->isConstant(),
+ OldGV->getLinkage(), Init, "",
+ /*InsertBefore*/ OldGV, OldGV->getThreadLocalMode(),
+ OldGV->getType()->getPointerAddressSpace());
GV->setVisibility(OldGV->getVisibility());
GV->setDSOLocal(OldGV->isDSOLocal());
GV->setComdat(OldGV->getComdat());
@@ -903,14 +910,17 @@ static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
/// the scalar stores that would be required.
static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
llvm::Constant *Init, Address Loc,
- bool isVolatile, CGBuilderTy &Builder) {
+ bool isVolatile, CGBuilderTy &Builder,
+ bool IsAutoInit) {
assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
"called emitStoresForInitAfterBZero for zero or undef value.");
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init)) {
- Builder.CreateStore(Init, Loc, isVolatile);
+ auto *I = Builder.CreateStore(Init, Loc, isVolatile);
+ if (IsAutoInit)
+ I->addAnnotationMetadata("auto-init");
return;
}
@@ -923,7 +933,7 @@ static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
emitStoresForInitAfterBZero(
CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
- Builder);
+ Builder, IsAutoInit);
}
return;
}
@@ -938,7 +948,7 @@ static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
emitStoresForInitAfterBZero(CGM, Elt,
Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
- isVolatile, Builder);
+ isVolatile, Builder, IsAutoInit);
}
}
@@ -1146,7 +1156,7 @@ static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
Address Loc, bool isVolatile,
CGBuilderTy &Builder,
- llvm::Constant *constant) {
+ llvm::Constant *constant, bool IsAutoInit) {
auto *Ty = constant->getType();
uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
if (!ConstantSize)
@@ -1155,7 +1165,9 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
if (canDoSingleStore) {
- Builder.CreateStore(constant, Loc, isVolatile);
+ auto *I = Builder.CreateStore(constant, Loc, isVolatile);
+ if (IsAutoInit)
+ I->addAnnotationMetadata("auto-init");
return;
}
@@ -1164,14 +1176,17 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
// If the initializer is all or mostly the same, codegen with bzero / memset
// then do a few stores afterward.
if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
- Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0), SizeVal,
- isVolatile);
+ auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0),
+ SizeVal, isVolatile);
+ if (IsAutoInit)
+ I->addAnnotationMetadata("auto-init");
bool valueAlreadyCorrect =
constant->isNullValue() || isa<llvm::UndefValue>(constant);
if (!valueAlreadyCorrect) {
Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
- emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
+ emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
+ IsAutoInit);
}
return;
}
@@ -1186,8 +1201,10 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
assert(AP.getBitWidth() <= 8);
Value = AP.getLimitedValue();
}
- Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal,
- isVolatile);
+ auto *I = Builder.CreateMemSet(
+ Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal, isVolatile);
+ if (IsAutoInit)
+ I->addAnnotationMetadata("auto-init");
return;
}
@@ -1200,7 +1217,8 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
Address EltPtr = Builder.CreateStructGEP(Loc, i);
emitStoresForConstant(
CGM, D, EltPtr, isVolatile, Builder,
- cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
+ cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)),
+ IsAutoInit);
}
return;
}
@@ -1211,7 +1229,8 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
Address EltPtr = Builder.CreateConstArrayGEP(Loc, i);
emitStoresForConstant(
CGM, D, EltPtr, isVolatile, Builder,
- cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
+ cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)),
+ IsAutoInit);
}
return;
}
@@ -1219,10 +1238,13 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
}
// Copy from a global.
- Builder.CreateMemCpy(Loc,
- createUnnamedGlobalForMemcpyFrom(
- CGM, D, Builder, constant, Loc.getAlignment()),
- SizeVal, isVolatile);
+ auto *I =
+ Builder.CreateMemCpy(Loc,
+ createUnnamedGlobalForMemcpyFrom(
+ CGM, D, Builder, constant, Loc.getAlignment()),
+ SizeVal, isVolatile);
+ if (IsAutoInit)
+ I->addAnnotationMetadata("auto-init");
}
static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
@@ -1231,7 +1253,8 @@ static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
llvm::Type *ElTy = Loc.getElementType();
llvm::Constant *constant =
constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
- emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
+ emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
+ /*IsAutoInit=*/true);
}
static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
@@ -1241,7 +1264,8 @@ static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
llvm::Constant *constant = constWithPadding(
CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
assert(!isa<llvm::UndefValue>(constant));
- emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
+ emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
+ /*IsAutoInit=*/true);
}
static bool containsUndef(llvm::Constant *constant) {
@@ -1710,14 +1734,16 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
case LangOptions::TrivialAutoVarInitKind::Uninitialized:
llvm_unreachable("Uninitialized handled by caller");
- case LangOptions::TrivialAutoVarInitKind::Zero:
+ case LangOptions::TrivialAutoVarInitKind::Zero: {
if (CGM.stopAutoInit())
return;
if (!EltSize.isOne())
SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
- Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
- isVolatile);
+ auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0),
+ SizeVal, isVolatile);
+ I->addAnnotationMetadata("auto-init");
break;
+ }
case LangOptions::TrivialAutoVarInitKind::Pattern: {
if (CGM.stopAutoInit())
@@ -1746,10 +1772,12 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
Cur->addIncoming(Begin.getPointer(), OriginBB);
CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
- Builder.CreateMemCpy(Address(Cur, CurAlign),
- createUnnamedGlobalForMemcpyFrom(
- CGM, D, Builder, Constant, ConstantAlign),
- BaseSizeInChars, isVolatile);
+ auto *I =
+ Builder.CreateMemCpy(Address(Cur, CurAlign),
+ createUnnamedGlobalForMemcpyFrom(
+ CGM, D, Builder, Constant, ConstantAlign),
+ BaseSizeInChars, isVolatile);
+ I->addAnnotationMetadata("auto-init");
llvm::Value *Next =
Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
@@ -1870,7 +1898,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
emitStoresForConstant(
CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
- type.isVolatileQualified(), Builder, constant);
+ type.isVolatileQualified(), Builder, constant, /*IsAutoInit=*/false);
}
/// Emit an expression as an initializer for an object (variable, field, etc.)
@@ -2088,21 +2116,47 @@ void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
}
-void CodeGenFunction::pushLifetimeExtendedDestroy(
- CleanupKind cleanupKind, Address addr, QualType type,
- Destroyer *destroyer, bool useEHCleanupForArray) {
- // Push an EH-only cleanup for the object now.
- // FIXME: When popping normal cleanups, we need to keep this EH cleanup
- // around in case a temporary's destructor throws an exception.
- if (cleanupKind & EHCleanup)
- EHStack.pushCleanup<DestroyObject>(
- static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
+void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
+ Address addr, QualType type,
+ Destroyer *destroyer,
+ bool useEHCleanupForArray) {
+ // If we're not in a conditional branch, we don't need to bother generating a
+ // conditional cleanup.
+ if (!isInConditionalBranch()) {
+ // Push an EH-only cleanup for the object now.
+ // FIXME: When popping normal cleanups, we need to keep this EH cleanup
+ // around in case a temporary's destructor throws an exception.
+ if (cleanupKind & EHCleanup)
+ EHStack.pushCleanup<DestroyObject>(
+ static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
+ destroyer, useEHCleanupForArray);
+
+ return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
+ cleanupKind, Address::invalid(), addr, type, destroyer, useEHCleanupForArray);
+ }
+
+ // Otherwise, we should only destroy the object if it's been initialized.
+ // Re-use the active flag and saved address across both the EH and end of
+ // scope cleanups.
+
+ using SavedType = typename DominatingValue<Address>::saved_type;
+ using ConditionalCleanupType =
+ EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
+ Destroyer *, bool>;
+
+ Address ActiveFlag = createCleanupActiveFlag();
+ SavedType SavedAddr = saveValueInCond(addr);
+
+ if (cleanupKind & EHCleanup) {
+ EHStack.pushCleanup<ConditionalCleanupType>(
+ static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), SavedAddr, type,
destroyer, useEHCleanupForArray);
+ initFullExprCleanupWithFlag(ActiveFlag);
+ }
- // Remember that we need to push a full cleanup for the object at the
- // end of the full-expression.
- pushCleanupAfterFullExpr<DestroyObject>(
- cleanupKind, addr, type, destroyer, useEHCleanupForArray);
+ pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
+ cleanupKind, ActiveFlag, SavedAddr, type, destroyer,
+ useEHCleanupForArray);
}
/// emitDestroy - Immediately perform the destruction of the given
diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp
index 5a8500364295..3dbf4cc7cb97 100644
--- a/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -21,7 +21,6 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/Support/Path.h"
-#include "llvm/Transforms/Utils/ModuleUtils.h"
using namespace clang;
using namespace CodeGen;
@@ -246,7 +245,10 @@ llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit),
- CGM.getContext().VoidTy, fn, FI, FunctionArgList());
+ CGM.getContext().VoidTy, fn, FI, FunctionArgList(),
+ VD.getLocation(), VD.getInit()->getExprLoc());
+ // Emit an artificial location for this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(CGF);
llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
@@ -271,8 +273,10 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
// extern "C" int atexit(void (*f)(void));
- assert(cast<llvm::Function>(dtorStub)->getFunctionType() ==
- llvm::FunctionType::get(CGM.VoidTy, false) &&
+ assert(dtorStub->getType() ==
+ llvm::PointerType::get(
+ llvm::FunctionType::get(CGM.VoidTy, false),
+ dtorStub->getType()->getPointerAddressSpace()) &&
"Argument to atexit has a wrong type.");
llvm::FunctionType *atexitTy =
@@ -288,7 +292,7 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
}
llvm::Value *
-CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Function *dtorStub) {
+CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub) {
// The unatexit subroutine unregisters __dtor functions that were previously
// registered by the atexit subroutine. If the referenced function is found,
// it is removed from the list of functions that are called at normal program
@@ -296,8 +300,10 @@ CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Function *dtorStub) {
// value is returned.
//
// extern "C" int unatexit(void (*f)(void));
- assert(dtorStub->getFunctionType() ==
- llvm::FunctionType::get(CGM.VoidTy, false) &&
+ assert(dtorStub->getType() ==
+ llvm::PointerType::get(
+ llvm::FunctionType::get(CGM.VoidTy, false),
+ dtorStub->getType()->getPointerAddressSpace()) &&
"Argument to unatexit has a wrong type.");
llvm::FunctionType *unatexitTy =
@@ -364,12 +370,9 @@ void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
- SourceLocation Loc, bool TLS, bool IsExternalLinkage) {
+ SourceLocation Loc, bool TLS) {
llvm::Function *Fn = llvm::Function::Create(
- FTy,
- IsExternalLinkage ? llvm::GlobalValue::ExternalLinkage
- : llvm::GlobalValue::InternalLinkage,
- Name, &getModule());
+ FTy, llvm::GlobalValue::InternalLinkage, Name, &getModule());
if (!getLangOpts().AppleKext && !TLS) {
// Set the section if needed.
@@ -377,8 +380,7 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
Fn->setSection(Section);
}
- if (Fn->hasInternalLinkage())
- SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
+ SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
Fn->setCallingConv(getRuntimeCC());
@@ -425,22 +427,6 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
!isInSanitizerBlacklist(SanitizerKind::ShadowCallStack, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
- auto RASignKind = getLangOpts().getSignReturnAddressScope();
- if (RASignKind != LangOptions::SignReturnAddressScopeKind::None) {
- Fn->addFnAttr("sign-return-address",
- RASignKind == LangOptions::SignReturnAddressScopeKind::All
- ? "all"
- : "non-leaf");
- auto RASignKey = getLangOpts().getSignReturnAddressKey();
- Fn->addFnAttr("sign-return-address-key",
- RASignKey == LangOptions::SignReturnAddressKeyKind::AKey
- ? "a_key"
- : "b_key");
- }
-
- if (getLangOpts().BranchTargetEnforcement)
- Fn->addFnAttr("branch-target-enforcement");
-
return Fn;
}
@@ -588,22 +574,10 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
- const bool UseSinitAndSterm = getCXXABI().useSinitAndSterm();
- if (UseSinitAndSterm) {
- GlobalUniqueModuleId = getUniqueModuleId(&getModule());
-
- // FIXME: We need to figure out what to hash on or encode into the unique ID
- // we need.
- if (GlobalUniqueModuleId.compare("") == 0)
- llvm::report_fatal_error(
- "cannot produce a unique identifier for this module"
- " based on strong external symbols");
- GlobalUniqueModuleId = GlobalUniqueModuleId.substr(1);
- }
-
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
+ const bool UseSinitAndSterm = getCXXABI().useSinitAndSterm();
// Create our global prioritized initialization function.
if (!PrioritizedCXXGlobalInits.empty()) {
assert(!UseSinitAndSterm && "Prioritized sinit and sterm functions are not"
@@ -643,24 +617,12 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
if (UseSinitAndSterm && CXXGlobalInits.empty())
return;
- // Create our global initialization function.
- SmallString<128> FuncName;
- bool IsExternalLinkage = false;
- if (UseSinitAndSterm) {
- llvm::Twine("__sinit80000000_clang_", GlobalUniqueModuleId)
- .toVector(FuncName);
- IsExternalLinkage = true;
- } else {
- // Include the filename in the symbol name. Including "sub_" matches gcc
- // and makes sure these symbols appear lexicographically behind the symbols
- // with priority emitted above.
- llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule()))
- .toVector(FuncName);
- }
-
+ // Include the filename in the symbol name. Including "sub_" matches gcc
+ // and makes sure these symbols appear lexicographically behind the symbols
+ // with priority emitted above.
llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
- FTy, FuncName, FI, SourceLocation(), false /* TLS */,
- IsExternalLinkage);
+ FTy, llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule())),
+ FI);
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
AddGlobalCtor(Fn);
@@ -694,25 +656,8 @@ void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
// Create our global cleanup function.
- llvm::Function *Fn = nullptr;
- if (getCXXABI().useSinitAndSterm()) {
- if (GlobalUniqueModuleId.empty()) {
- GlobalUniqueModuleId = getUniqueModuleId(&getModule());
- // FIXME: We need to figure out what to hash on or encode into the unique
- // ID we need.
- if (GlobalUniqueModuleId.compare("") == 0)
- llvm::report_fatal_error(
- "cannot produce a unique identifier for this module"
- " based on strong external symbols");
- GlobalUniqueModuleId = GlobalUniqueModuleId.substr(1);
- }
-
- Fn = CreateGlobalInitOrCleanUpFunction(
- FTy, llvm::Twine("__sterm80000000_clang_", GlobalUniqueModuleId), FI,
- SourceLocation(), false /* TLS */, true /* IsExternalLinkage */);
- } else {
- Fn = CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI);
- }
+ llvm::Function *Fn =
+ CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI);
CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
Fn, CXXGlobalDtorsOrStermFinalizers);
@@ -733,8 +678,9 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
StartFunction(GlobalDecl(D, DynamicInitKind::Initializer),
getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(),
- FunctionArgList(), D->getLocation(),
- D->getInit()->getExprLoc());
+ FunctionArgList());
+ // Emit an artificial location for this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
// Use guarded initialization if the global variable is weak. This
// occurs for, e.g., instantiated static data members and
@@ -868,7 +814,10 @@ llvm::Function *CodeGenFunction::generateDestroyHelper(
CurEHLocation = VD->getBeginLoc();
- StartFunction(VD, getContext().VoidTy, fn, FI, args);
+ StartFunction(GlobalDecl(VD, DynamicInitKind::GlobalArrayDestructor),
+ getContext().VoidTy, fn, FI, args);
+ // Emit an artificial location for this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
emitDestroy(addr, type, destroyer, useEHCleanupForArray);
diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp
index bdf70252b5ad..7a64963183bc 100644
--- a/clang/lib/CodeGen/CGException.cpp
+++ b/clang/lib/CodeGen/CGException.cpp
@@ -113,17 +113,19 @@ const EHPersonality
EHPersonality::MSVC_CxxFrameHandler3 = { "__CxxFrameHandler3", nullptr };
const EHPersonality
EHPersonality::GNU_Wasm_CPlusPlus = { "__gxx_wasm_personality_v0", nullptr };
+const EHPersonality EHPersonality::XL_CPlusPlus = {"__xlcxx_personality_v1",
+ nullptr};
static const EHPersonality &getCPersonality(const TargetInfo &Target,
const LangOptions &L) {
const llvm::Triple &T = Target.getTriple();
if (T.isWindowsMSVCEnvironment())
return EHPersonality::MSVC_CxxFrameHandler3;
- if (L.SjLjExceptions)
+ if (L.hasSjLjExceptions())
return EHPersonality::GNU_C_SJLJ;
- if (L.DWARFExceptions)
+ if (L.hasDWARFExceptions())
return EHPersonality::GNU_C;
- if (L.SEHExceptions)
+ if (L.hasSEHExceptions())
return EHPersonality::GNU_C_SEH;
return EHPersonality::GNU_C;
}
@@ -147,9 +149,9 @@ static const EHPersonality &getObjCPersonality(const TargetInfo &Target,
LLVM_FALLTHROUGH;
case ObjCRuntime::GCC:
case ObjCRuntime::ObjFW:
- if (L.SjLjExceptions)
+ if (L.hasSjLjExceptions())
return EHPersonality::GNU_ObjC_SJLJ;
- if (L.SEHExceptions)
+ if (L.hasSEHExceptions())
return EHPersonality::GNU_ObjC_SEH;
return EHPersonality::GNU_ObjC;
}
@@ -161,13 +163,15 @@ static const EHPersonality &getCXXPersonality(const TargetInfo &Target,
const llvm::Triple &T = Target.getTriple();
if (T.isWindowsMSVCEnvironment())
return EHPersonality::MSVC_CxxFrameHandler3;
- if (L.SjLjExceptions)
+ if (T.isOSAIX())
+ return EHPersonality::XL_CPlusPlus;
+ if (L.hasSjLjExceptions())
return EHPersonality::GNU_CPlusPlus_SJLJ;
- if (L.DWARFExceptions)
+ if (L.hasDWARFExceptions())
return EHPersonality::GNU_CPlusPlus;
- if (L.SEHExceptions)
+ if (L.hasSEHExceptions())
return EHPersonality::GNU_CPlusPlus_SEH;
- if (L.WasmExceptions)
+ if (L.hasWasmExceptions())
return EHPersonality::GNU_Wasm_CPlusPlus;
return EHPersonality::GNU_CPlusPlus;
}
@@ -472,7 +476,7 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
// In wasm we currently treat 'throw()' in the same way as 'noexcept'. In
// case of throw with types, we ignore it and print a warning for now.
// TODO Correctly handle exception specification in wasm
- if (CGM.getLangOpts().WasmExceptions) {
+ if (CGM.getLangOpts().hasWasmExceptions()) {
if (EST == EST_DynamicNone)
EHStack.pushTerminate();
else
@@ -560,7 +564,7 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
// In wasm we currently treat 'throw()' in the same way as 'noexcept'. In
// case of throw with types, we ignore it and print a warning for now.
// TODO Correctly handle exception specification in wasm
- if (CGM.getLangOpts().WasmExceptions) {
+ if (CGM.getLangOpts().hasWasmExceptions()) {
if (EST == EST_DynamicNone)
EHStack.popTerminate();
return;
@@ -1268,7 +1272,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
assert(RethrowBlock != WasmCatchStartBlock && RethrowBlock->empty());
Builder.SetInsertPoint(RethrowBlock);
llvm::Function *RethrowInCatchFn =
- CGM.getIntrinsic(llvm::Intrinsic::wasm_rethrow_in_catch);
+ CGM.getIntrinsic(llvm::Intrinsic::wasm_rethrow);
EmitNoreturnRuntimeCallOrInvoke(RethrowInCatchFn, {});
}
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 9e8770573d70..a3f90449bb4c 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -125,8 +125,13 @@ Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
}
void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) {
- assert(isa<llvm::AllocaInst>(Var.getPointer()));
- auto *Store = new llvm::StoreInst(Init, Var.getPointer(), /*volatile*/ false,
+ auto *Alloca = Var.getPointer();
+ assert(isa<llvm::AllocaInst>(Alloca) ||
+ (isa<llvm::AddrSpaceCastInst>(Alloca) &&
+ isa<llvm::AllocaInst>(
+ cast<llvm::AddrSpaceCastInst>(Alloca)->getPointerOperand())));
+
+ auto *Store = new llvm::StoreInst(Init, Alloca, /*volatile*/ false,
Var.getAlignment().getAsAlign());
llvm::BasicBlock *Block = AllocaInsertPt->getParent();
Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
@@ -182,6 +187,7 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
QualType BoolTy = getContext().BoolTy;
SourceLocation Loc = E->getExprLoc();
+ CGFPOptionsRAII FPOptsRAII(*this, E);
if (!E->getType()->isAnyComplexType())
return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
@@ -1170,6 +1176,13 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
return Address(EmitScalarExpr(E), Align);
}
+llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
+ llvm::Value *V = RV.getScalarVal();
+ if (auto MPT = T->getAs<MemberPointerType>())
+ return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
+ return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
+}
+
RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
if (Ty->isVoidType())
return RValue::get(nullptr);
@@ -1509,6 +1522,29 @@ CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
if (result.HasSideEffects)
return ConstantEmission();
+ // In CUDA/HIP device compilation, a lambda may capture a reference variable
+ // referencing a global host variable by copy. In this case the lambda should
+ // make a copy of the value of the global host variable. The DRE of the
+ // captured reference variable cannot be emitted as load from the host
+ // global variable as compile time constant, since the host variable is not
+ // accessible on device. The DRE of the captured reference variable has to be
+ // loaded from captures.
+ if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
+ refExpr->refersToEnclosingVariableOrCapture()) {
+ auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
+ if (MD && MD->getParent()->isLambda() &&
+ MD->getOverloadedOperator() == OO_Call) {
+ const APValue::LValueBase &base = result.Val.getLValueBase();
+ if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
+ if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
+ if (!VD->hasAttr<CUDADeviceAttr>()) {
+ return ConstantEmission();
+ }
+ }
+ }
+ }
+ }
+
// Emit as a constant.
auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
result.Val, resultType);
@@ -1680,7 +1716,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
if (Ty->isVectorType()) {
const llvm::Type *EltTy = Addr.getElementType();
- const auto *VTy = cast<llvm::VectorType>(EltTy);
+ const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
// Handle vectors of size 3 like size 4 for better performance.
if (VTy->getNumElements() == 3) {
@@ -1692,8 +1728,8 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
// Shuffle vector to get vec3.
- V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
- ArrayRef<int>{0, 1, 2}, "extractVec");
+ V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2},
+ "extractVec");
return EmitFromMemory(V, Ty);
}
}
@@ -1765,8 +1801,9 @@ static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
auto *VectorTy = dyn_cast<llvm::VectorType>(
cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
if (VectorTy && !IsVector) {
- auto *ArrayTy = llvm::ArrayType::get(VectorTy->getElementType(),
- VectorTy->getNumElements());
+ auto *ArrayTy = llvm::ArrayType::get(
+ VectorTy->getElementType(),
+ cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy));
}
@@ -1797,10 +1834,9 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
llvm::Type *SrcTy = Value->getType();
auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy);
// Handle vec3 special.
- if (VecTy && VecTy->getNumElements() == 3) {
+ if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
// Our source is a vec3, do a shuffle vector to make it a vec4.
- Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
- ArrayRef<int>{0, 1, 2, -1},
+ Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
"extractVec");
SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
}
@@ -1921,22 +1957,27 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
llvm::Type *ResLTy = ConvertType(LV.getType());
Address Ptr = LV.getBitFieldAddress();
- llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
-
+ llvm::Value *Val =
+ Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
+
+ bool UseVolatile = LV.isVolatileQualified() &&
+ Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
+ const unsigned StorageSize =
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
if (Info.IsSigned) {
- assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
- unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
+ assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
+ unsigned HighBits = StorageSize - Offset - Info.Size;
if (HighBits)
Val = Builder.CreateShl(Val, HighBits, "bf.shl");
- if (Info.Offset + HighBits)
- Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
+ if (Offset + HighBits)
+ Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
} else {
- if (Info.Offset)
- Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
- if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
- Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
- Info.Size),
- "bf.clear");
+ if (Offset)
+ Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
+ if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
+ Val = Builder.CreateAnd(
+ Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
}
Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
EmitScalarRangeCheck(Val, LV.getType(), Loc);
@@ -1967,8 +2008,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
for (unsigned i = 0; i != NumResultElts; ++i)
Mask.push_back(getAccessedFieldNo(i, Elts));
- Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
- Mask);
+ Vec = Builder.CreateShuffleVector(Vec, Mask);
return RValue::get(Vec);
}
@@ -2138,39 +2178,42 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
/*isSigned=*/false);
llvm::Value *MaskedVal = SrcVal;
+ const bool UseVolatile =
+ CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
+ Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
+ const unsigned StorageSize =
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
// See if there are other bits in the bitfield's storage we'll need to load
// and mask together with source before storing.
- if (Info.StorageSize != Info.Size) {
- assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
+ if (StorageSize != Info.Size) {
+ assert(StorageSize > Info.Size && "Invalid bitfield size.");
llvm::Value *Val =
- Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
+ Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
// Mask the source value as needed.
if (!hasBooleanRepresentation(Dst.getType()))
- SrcVal = Builder.CreateAnd(SrcVal,
- llvm::APInt::getLowBitsSet(Info.StorageSize,
- Info.Size),
- "bf.value");
+ SrcVal = Builder.CreateAnd(
+ SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
+ "bf.value");
MaskedVal = SrcVal;
- if (Info.Offset)
- SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
+ if (Offset)
+ SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
// Mask out the original value.
- Val = Builder.CreateAnd(Val,
- ~llvm::APInt::getBitsSet(Info.StorageSize,
- Info.Offset,
- Info.Offset + Info.Size),
- "bf.clear");
+ Val = Builder.CreateAnd(
+ Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
+ "bf.clear");
// Or together the unchanged values and the source value.
SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
} else {
- assert(Info.Offset == 0);
+ assert(Offset == 0);
// According to the AACPS:
// When a volatile bit-field is written, and its container does not overlap
- // with any non-bit-field member, its container must be read exactly once and
- // written exactly once using the access width appropriate to the type of the
- // container. The two accesses are not atomic.
+ // with any non-bit-field member, its container must be read exactly once
+ // and written exactly once using the access width appropriate to the type
+ // of the container. The two accesses are not atomic.
if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
Builder.CreateLoad(Ptr, true, "bf.load");
@@ -2185,8 +2228,8 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Sign extend the value if needed.
if (Info.IsSigned) {
- assert(Info.Size <= Info.StorageSize);
- unsigned HighBits = Info.StorageSize - Info.Size;
+ assert(Info.Size <= StorageSize);
+ unsigned HighBits = StorageSize - Info.Size;
if (HighBits) {
ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
@@ -2212,7 +2255,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
unsigned NumSrcElts = VTy->getNumElements();
unsigned NumDstElts =
- cast<llvm::VectorType>(Vec->getType())->getNumElements();
+ cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
if (NumDstElts == NumSrcElts) {
// Use shuffle vector is the src and destination are the same number of
// elements and restore the vector mask since it is on the side it will be
@@ -2221,8 +2264,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
for (unsigned i = 0; i != NumSrcElts; ++i)
Mask[getAccessedFieldNo(i, Elts)] = i;
- Vec = Builder.CreateShuffleVector(
- SrcVal, llvm::UndefValue::get(Vec->getType()), Mask);
+ Vec = Builder.CreateShuffleVector(SrcVal, Mask);
} else if (NumDstElts > NumSrcElts) {
// Extended the source vector to the same length and then shuffle it
// into the destination.
@@ -2232,8 +2274,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
for (unsigned i = 0; i != NumSrcElts; ++i)
ExtMask.push_back(i);
ExtMask.resize(NumDstElts, -1);
- llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(
- SrcVal, llvm::UndefValue::get(SrcVal->getType()), ExtMask);
+ llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
// build identity
SmallVector<int, 4> Mask;
for (unsigned i = 0; i != NumDstElts; ++i)
@@ -2806,6 +2847,10 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
AlignmentSource::Decl);
+ if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND))
+ return MakeAddrLValue(CGM.GetAddrOfTemplateParamObject(TPO), T,
+ AlignmentSource::Decl);
+
llvm_unreachable("Unhandled DeclRefExpr");
}
@@ -2975,7 +3020,7 @@ llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
// Floating-point types which fit into intptr_t are bitcast to integers
// and then passed directly (after zero-extension, if necessary).
if (V->getType()->isFloatingPointTy()) {
- unsigned Bits = V->getType()->getPrimitiveSizeInBits();
+ unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedSize();
if (Bits <= TargetTy->getIntegerBitWidth())
V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
Bits));
@@ -3164,7 +3209,7 @@ void CodeGenFunction::EmitCheck(
}
if (TrapCond)
- EmitTrapCheck(TrapCond);
+ EmitTrapCheck(TrapCond, CheckHandler);
if (!FatalCond && !RecoverableCond)
return;
@@ -3359,7 +3404,7 @@ void CodeGenFunction::EmitCfiCheckFail() {
// Data == nullptr means the calling module has trap behaviour for this check.
llvm::Value *DataIsNotNullPtr =
Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
- EmitTrapCheck(DataIsNotNullPtr);
+ EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
llvm::StructType *SourceLocationTy =
llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
@@ -3398,7 +3443,7 @@ void CodeGenFunction::EmitCfiCheckFail() {
EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
{Data, Addr, ValidVtable});
else
- EmitTrapCheck(Cond);
+ EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
}
FinishFunction();
@@ -3418,20 +3463,39 @@ void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
Builder.CreateUnreachable();
}
-void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) {
+void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
+ SanitizerHandler CheckHandlerID) {
llvm::BasicBlock *Cont = createBasicBlock("cont");
// If we're optimizing, collapse all calls to trap down to just one per
- // function to save on code size.
+ // check-type per function to save on code size.
+ if (TrapBBs.size() <= CheckHandlerID)
+ TrapBBs.resize(CheckHandlerID + 1);
+ llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
+
if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
TrapBB = createBasicBlock("trap");
Builder.CreateCondBr(Checked, Cont, TrapBB);
EmitBlock(TrapBB);
- llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
+
+ llvm::CallInst *TrapCall =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
+ llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
+
+ if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
+ auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
+ CGM.getCodeGenOpts().TrapFuncName);
+ TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A);
+ }
TrapCall->setDoesNotReturn();
TrapCall->setDoesNotThrow();
Builder.CreateUnreachable();
} else {
+ auto Call = TrapBB->begin();
+ assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
+
+ Call->applyMergedLocation(Call->getDebugLoc(),
+ Builder.getCurrentDebugLocation());
Builder.CreateCondBr(Checked, Cont, TrapBB);
}
@@ -3439,7 +3503,8 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) {
}
llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
- llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID));
+ llvm::CallInst *TrapCall =
+ Builder.CreateCall(CGM.getIntrinsic(IntrID));
if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
@@ -3793,7 +3858,7 @@ LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
llvm::Value *NumRows = Builder.getIntN(
RowIdx->getType()->getScalarSizeInBits(),
- E->getBase()->getType()->getAs<ConstantMatrixType>()->getNumRows());
+ E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
llvm::Value *FinalIdx =
Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
return LValue::MakeMatrixElt(
@@ -3868,15 +3933,17 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
llvm::APSInt ConstLength;
if (Length) {
// Idx = LowerBound + Length - 1;
- if (Length->isIntegerConstantExpr(ConstLength, C)) {
- ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
+ if (Optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
+ ConstLength = CL->zextOrTrunc(PointerWidthInBits);
Length = nullptr;
}
auto *LowerBound = E->getLowerBound();
llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
- if (LowerBound && LowerBound->isIntegerConstantExpr(ConstLowerBound, C)) {
- ConstLowerBound = ConstLowerBound.zextOrTrunc(PointerWidthInBits);
- LowerBound = nullptr;
+ if (LowerBound) {
+ if (Optional<llvm::APSInt> LB = LowerBound->getIntegerConstantExpr(C)) {
+ ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
+ LowerBound = nullptr;
+ }
}
if (!Length)
--ConstLength;
@@ -3913,8 +3980,10 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
: BaseTy;
if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
Length = VAT->getSizeExpr();
- if (Length->isIntegerConstantExpr(ConstLength, C))
+ if (Optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
+ ConstLength = *L;
Length = nullptr;
+ }
} else {
auto *CAT = C.getAsConstantArrayType(ArrayTy);
ConstLength = CAT->getSize();
@@ -4194,32 +4263,45 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
if (field->isBitField()) {
const CGRecordLayout &RL =
- CGM.getTypes().getCGRecordLayout(field->getParent());
+ CGM.getTypes().getCGRecordLayout(field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
+ const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
+ CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
+ Info.VolatileStorageSize != 0 &&
+ field->getType()
+ .withCVRQualifiers(base.getVRQualifiers())
+ .isVolatileQualified();
Address Addr = base.getAddress(*this);
unsigned Idx = RL.getLLVMFieldNo(field);
const RecordDecl *rec = field->getParent();
- if (!IsInPreservedAIRegion &&
- (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
- if (Idx != 0)
- // For structs, we GEP to the field that the record layout suggests.
- Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
- } else {
- llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
- getContext().getRecordType(rec), rec->getLocation());
- Addr = Builder.CreatePreserveStructAccessIndex(Addr, Idx,
- getDebugInfoFIndex(rec, field->getFieldIndex()),
- DbgInfo);
+ if (!UseVolatile) {
+ if (!IsInPreservedAIRegion &&
+ (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
+ if (Idx != 0)
+ // For structs, we GEP to the field that the record layout suggests.
+ Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
+ } else {
+ llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
+ getContext().getRecordType(rec), rec->getLocation());
+ Addr = Builder.CreatePreserveStructAccessIndex(
+ Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
+ DbgInfo);
+ }
}
-
+ const unsigned SS =
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
// Get the access type.
- llvm::Type *FieldIntTy =
- llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
+ llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
if (Addr.getElementType() != FieldIntTy)
Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
+ if (UseVolatile) {
+ const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
+ if (VolatileOffset)
+ Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
+ }
QualType fieldType =
- field->getType().withCVRQualifiers(base.getVRQualifiers());
+ field->getType().withCVRQualifiers(base.getVRQualifiers());
// TODO: Support TBAA for bit fields.
LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
@@ -4555,6 +4637,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_IntToOCLSampler:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index fb96d70732e8..60ea1b2af037 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -903,6 +903,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
@@ -1214,6 +1216,11 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// Copy into the destination if the assignment isn't ignored.
EmitFinalDestCopy(E->getType(), LHS);
+
+ if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
+ E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
+ CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
+ E->getType());
}
void AggExprEmitter::
@@ -1231,6 +1238,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// Save whether the destination's lifetime is externally managed.
bool isExternallyDestructed = Dest.isExternallyDestructed();
+ bool destructNonTrivialCStruct =
+ !isExternallyDestructed &&
+ E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
+ isExternallyDestructed |= destructNonTrivialCStruct;
+ Dest.setExternallyDestructed(isExternallyDestructed);
eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
@@ -1252,6 +1264,10 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
Visit(E->getFalseExpr());
eval.end(CGF);
+ if (destructNonTrivialCStruct)
+ CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
+ E->getType());
+
CGF.EmitBlock(ContBlock);
}
@@ -1370,11 +1386,110 @@ void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
}
+/// Determine whether the given cast kind is known to always convert values
+/// with all zero bits in their value representation to values with all zero
+/// bits in their value representation.
+static bool castPreservesZero(const CastExpr *CE) {
+ switch (CE->getCastKind()) {
+ // No-ops.
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ case CK_BitCast:
+ case CK_ToUnion:
+ case CK_ToVoid:
+ // Conversions between (possibly-complex) integral, (possibly-complex)
+ // floating-point, and bool.
+ case CK_BooleanToSignedIntegral:
+ case CK_FloatingCast:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingToBoolean:
+ case CK_FloatingToIntegral:
+ case CK_IntegralCast:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ // Reinterpreting integers as pointers and vice versa.
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ // Language extensions.
+ case CK_VectorSplat:
+ case CK_NonAtomicToAtomic:
+ case CK_AtomicToNonAtomic:
+ return true;
+
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_NullToMemberPointer:
+ case CK_ReinterpretMemberPointer:
+ // FIXME: ABI-dependent.
+ return false;
+
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_IntToOCLSampler:
+ case CK_ZeroToOCLOpaqueType:
+ // FIXME: Check these.
+ return false;
+
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
+ case CK_FixedPointToFloating:
+ case CK_FixedPointToIntegral:
+ case CK_FloatingToFixedPoint:
+ case CK_IntegralToFixedPoint:
+ // FIXME: Do all fixed-point types represent zero as all 0 bits?
+ return false;
+
+ case CK_AddressSpaceConversion:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_Dynamic:
+ case CK_NullToPointer:
+ case CK_PointerToBoolean:
+ // FIXME: Preserves zeroes only if zero pointers and null pointers have the
+ // same representation in all involved address spaces.
+ return false;
+
+ case CK_ARCConsumeObject:
+ case CK_ARCExtendBlockObject:
+ case CK_ARCProduceObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_BuiltinFnToFnPtr:
+ case CK_Dependent:
+ case CK_LValueBitCast:
+ case CK_LValueToRValue:
+ case CK_LValueToRValueBitCast:
+ case CK_UncheckedDerivedToBase:
+ return false;
+ }
+ llvm_unreachable("Unhandled clang::CastKind enum");
+}
+
/// isSimpleZero - If emitting this value will obviously just cause a store of
/// zero to memory, return true. This can return false if uncertain, so it just
/// handles simple cases.
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
E = E->IgnoreParens();
+ while (auto *CE = dyn_cast<CastExpr>(E)) {
+ if (!castPreservesZero(CE))
+ break;
+ E = CE->getSubExpr()->IgnoreParens();
+ }
// 0
if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
@@ -1757,7 +1872,9 @@ void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E)
/// non-zero bytes that will be stored when outputting the initializer for the
/// specified initializer expression.
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
- E = E->IgnoreParens();
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = MTE->getSubExpr();
+ E = E->IgnoreParenNoopCasts(CGF.getContext());
// 0 and 0.0 won't require any non-zero stores!
if (isSimpleZero(E, CGF)) return CharUnits::Zero();
@@ -1806,7 +1923,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
}
}
-
+ // FIXME: This overestimates the number of non-zero bytes for bit-fields.
CharUnits NumNonZeroBytes = CharUnits::Zero();
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
@@ -1974,28 +2091,28 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
// Get data size info for this aggregate. Don't copy the tail padding if this
// might be a potentially-overlapping subobject, since the tail padding might
// be occupied by a different object. Otherwise, copying it is fine.
- std::pair<CharUnits, CharUnits> TypeInfo;
+ TypeInfoChars TypeInfo;
if (MayOverlap)
TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
else
TypeInfo = getContext().getTypeInfoInChars(Ty);
llvm::Value *SizeVal = nullptr;
- if (TypeInfo.first.isZero()) {
+ if (TypeInfo.Width.isZero()) {
// But note that getTypeInfo returns 0 for a VLA.
if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
getContext().getAsArrayType(Ty))) {
QualType BaseEltTy;
SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
- assert(!TypeInfo.first.isZero());
+ assert(!TypeInfo.Width.isZero());
SizeVal = Builder.CreateNUWMul(
SizeVal,
- llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
+ llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
}
}
if (!SizeVal) {
- SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
+ SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
}
// FIXME: If we have a volatile struct, the optimizer can remove what might
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index d59aa6ce0fb9..e1907a6f0680 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -220,7 +220,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
assert(DevirtualizedMethod);
const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
- const Expr *Inner = Base->ignoreParenBaseCasts();
+ const Expr *Inner = Base->IgnoreParenBaseCasts();
if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
MD->getReturnType().getCanonicalType())
// If the return types are not the same, this might be a case where more
@@ -1329,7 +1329,7 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
const CallExpr *TheCall,
bool IsDelete) {
CallArgList Args;
- EmitCallArgs(Args, Type->getParamTypes(), TheCall->arguments());
+ EmitCallArgs(Args, Type, TheCall->arguments());
// Find the allocation or deallocation function that we're calling.
ASTContext &Ctx = getContext();
DeclarationName Name = Ctx.DeclarationNames
@@ -1570,7 +1570,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Value *allocSize =
EmitCXXNewAllocSize(*this, E, minElements, numElements,
allocSizeWithoutCookie);
- CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
+ CharUnits allocAlign = getContext().getPreferredTypeAlignInChars(allocType);
// Emit the allocation call. If the allocator is a global placement
// operator, just "inline" it directly.
@@ -1788,11 +1788,14 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
// Pass the std::destroying_delete tag if present.
+ llvm::AllocaInst *DestroyingDeleteTag = nullptr;
if (Params.DestroyingDelete) {
QualType DDTag = *ParamTypeIt++;
- // Just pass an 'undef'. We expect the tag type to be an empty struct.
- auto *V = llvm::UndefValue::get(getTypes().ConvertType(DDTag));
- DeleteArgs.add(RValue::get(V), DDTag);
+ llvm::Type *Ty = getTypes().ConvertType(DDTag);
+ CharUnits Align = CGM.getNaturalTypeAlignment(DDTag);
+ DestroyingDeleteTag = CreateTempAlloca(Ty, "destroying.delete.tag");
+ DestroyingDeleteTag->setAlignment(Align.getAsAlign());
+ DeleteArgs.add(RValue::getAggregate(Address(DestroyingDeleteTag, Align)), DDTag);
}
// Pass the size if the delete function has a size_t parameter.
@@ -1817,8 +1820,9 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
// Pass the alignment if the delete function has an align_val_t parameter.
if (Params.Alignment) {
QualType AlignValType = *ParamTypeIt++;
- CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits(
- getContext().getTypeAlignIfKnown(DeleteTy));
+ CharUnits DeleteTypeAlign =
+ getContext().toCharUnitsFromBits(getContext().getTypeAlignIfKnown(
+ DeleteTy, true /* NeedsPreferredAlignment */));
llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
DeleteTypeAlign.getQuantity());
DeleteArgs.add(RValue::get(Align), AlignValType);
@@ -1829,6 +1833,11 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
// Emit the call to delete.
EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
+
+ // If call argument lowering didn't use the destroying_delete_t alloca,
+ // remove it again.
+ if (DestroyingDeleteTag && DestroyingDeleteTag->use_empty())
+ DestroyingDeleteTag->eraseFromParent();
}
namespace {
@@ -2191,7 +2200,8 @@ llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
// polymorphic class type, the result refers to a std::type_info object
// representing the type of the most derived object (that is, the dynamic
// type) to which the glvalue refers.
- if (E->isPotentiallyEvaluated())
+ // If the operand is already most derived object, no need to look up vtable.
+ if (E->isPotentiallyEvaluated() && !E->isMostDerived(getContext()))
return EmitTypeidFromVTable(*this, E->getExprOperand(),
StdTypeInfoPtrTy);
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
index a49817898ae3..6b6b901e0376 100644
--- a/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -527,6 +527,8 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_ZeroToOCLOpaqueType:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
@@ -534,17 +536,21 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
- case CK_IntegralRealToComplex:
+ case CK_IntegralRealToComplex: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Op);
return EmitScalarToComplexCast(CGF.EmitScalarExpr(Op), Op->getType(),
DestTy, Op->getExprLoc());
+ }
case CK_FloatingComplexCast:
case CK_FloatingComplexToIntegralComplex:
case CK_IntegralComplexCast:
- case CK_IntegralComplexToFloatingComplex:
+ case CK_IntegralComplexToFloatingComplex: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Op);
return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy,
Op->getExprLoc());
}
+ }
llvm_unreachable("unknown cast resulting in complex value");
}
@@ -898,6 +904,7 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
if (const AtomicType *AT = LHSTy->getAs<AtomicType>())
LHSTy = AT->getValueType();
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
BinOpInfo OpInfo;
// Load the RHS and LHS operands.
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index c6b2930faece..497f9c04c9f8 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -58,14 +58,14 @@ struct ConstantAggregateBuilderUtils {
}
llvm::Constant *getPadding(CharUnits PadSize) const {
- llvm::Type *Ty = CGM.Int8Ty;
+ llvm::Type *Ty = CGM.CharTy;
if (PadSize > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
return llvm::UndefValue::get(Ty);
}
llvm::Constant *getZeroes(CharUnits ZeroSize) const {
- llvm::Type *Ty = llvm::ArrayType::get(CGM.Int8Ty, ZeroSize.getQuantity());
+ llvm::Type *Ty = llvm::ArrayType::get(CGM.CharTy, ZeroSize.getQuantity());
return llvm::ConstantAggregateZero::get(Ty);
}
};
@@ -1069,7 +1069,7 @@ public:
assert(CurSize <= TotalSize && "Union size mismatch!");
if (unsigned NumPadBytes = TotalSize - CurSize) {
- llvm::Type *Ty = CGM.Int8Ty;
+ llvm::Type *Ty = CGM.CharTy;
if (NumPadBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumPadBytes);
@@ -1163,6 +1163,8 @@ public:
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
@@ -1620,8 +1622,8 @@ llvm::Constant *ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) {
if (CD->isTrivial() && CD->isDefaultConstructor())
return CGM.EmitNullConstant(D.getType());
}
- InConstantContext = true;
}
+ InConstantContext = D.hasConstantInitialization();
QualType destType = D.getType();
@@ -1877,6 +1879,10 @@ ConstantLValue
ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
// Handle values.
if (const ValueDecl *D = base.dyn_cast<const ValueDecl*>()) {
+ // The constant always points to the canonical declaration. We want to look
+ // at properties of the most recent declaration at the point of emission.
+ D = cast<ValueDecl>(D->getMostRecentDecl());
+
if (D->hasAttr<WeakRefAttr>())
return CGM.GetWeakRefReference(D).getPointer();
@@ -1899,6 +1905,9 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
if (auto *GD = dyn_cast<MSGuidDecl>(D))
return CGM.GetAddrOfMSGuidDecl(GD);
+ if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(D))
+ return CGM.GetAddrOfTemplateParamObject(TPO);
+
return nullptr;
}
@@ -2108,8 +2117,7 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
case APValue::Union:
return ConstStructBuilder::BuildStruct(*this, Value, DestType);
case APValue::Array: {
- const ConstantArrayType *CAT =
- CGM.getContext().getAsConstantArrayType(DestType);
+ const ArrayType *ArrayTy = CGM.getContext().getAsArrayType(DestType);
unsigned NumElements = Value.getArraySize();
unsigned NumInitElts = Value.getArrayInitializedElts();
@@ -2117,7 +2125,7 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
llvm::Constant *Filler = nullptr;
if (Value.hasArrayFiller()) {
Filler = tryEmitAbstractForMemory(Value.getArrayFiller(),
- CAT->getElementType());
+ ArrayTy->getElementType());
if (!Filler)
return nullptr;
}
@@ -2132,7 +2140,7 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
llvm::Type *CommonElementType = nullptr;
for (unsigned I = 0; I < NumInitElts; ++I) {
llvm::Constant *C = tryEmitPrivateForMemory(
- Value.getArrayInitializedElt(I), CAT->getElementType());
+ Value.getArrayInitializedElt(I), ArrayTy->getElementType());
if (!C) return nullptr;
if (I == 0)
@@ -2142,16 +2150,6 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
Elts.push_back(C);
}
- // This means that the array type is probably "IncompleteType" or some
- // type that is not ConstantArray.
- if (CAT == nullptr && CommonElementType == nullptr && !NumInitElts) {
- const ArrayType *AT = CGM.getContext().getAsArrayType(DestType);
- CommonElementType = CGM.getTypes().ConvertType(AT->getElementType());
- llvm::ArrayType *AType = llvm::ArrayType::get(CommonElementType,
- NumElements);
- return llvm::ConstantAggregateZero::get(AType);
- }
-
llvm::ArrayType *Desired =
cast<llvm::ArrayType>(CGM.getTypes().ConvertType(DestType));
return EmitArrayConstant(CGM, Desired, CommonElementType, NumElements, Elts,
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 6131f97995dc..6f7e8263fa10 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -26,12 +26,13 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/CodeGenOptions.h"
-#include "clang/Basic/FixedPoint.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/APFixedPoint.h"
#include "llvm/ADT/Optional.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/FixedPointBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalVariable.h"
@@ -356,10 +357,6 @@ public:
/// and an integer.
Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
SourceLocation Loc);
- Value *EmitFixedPointConversion(Value *Src, FixedPointSemantics &SrcFixedSema,
- FixedPointSemantics &DstFixedSema,
- SourceLocation Loc,
- bool DstIsInteger = false);
/// Emit a conversion from the specified complex type to the specified
/// destination type, where the destination type is an LLVM scalar type.
@@ -532,14 +529,7 @@ public:
if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
- Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
- llvm::Value *Args[] = {
- llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()),
- llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0),
- llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0),
- };
-
- return CGF.EmitBuiltinAvailable(Args);
+ return CGF.EmitBuiltinAvailable(Version);
}
Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
@@ -1222,13 +1212,14 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// padding is enabled because overflow into this bit is undefined
// behavior.
return Builder.CreateIsNotNull(Src, "tobool");
- if (DstType->isFixedPointType() || DstType->isIntegerType())
+ if (DstType->isFixedPointType() || DstType->isIntegerType() ||
+ DstType->isRealFloatingType())
return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
llvm_unreachable(
"Unhandled scalar conversion from a fixed point type to another type.");
} else if (DstType->isFixedPointType()) {
- if (SrcType->isIntegerType())
+ if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
// This also includes converting booleans and enums to fixed point types.
return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
@@ -1323,7 +1314,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
"Splatted expr doesn't match with vector element type?");
// Splat the element across to all elements
- unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
return Builder.CreateVectorSplat(NumElements, Src, "splat");
}
@@ -1444,92 +1435,28 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
QualType DstTy,
SourceLocation Loc) {
- FixedPointSemantics SrcFPSema =
- CGF.getContext().getFixedPointSemantics(SrcTy);
- FixedPointSemantics DstFPSema =
- CGF.getContext().getFixedPointSemantics(DstTy);
- return EmitFixedPointConversion(Src, SrcFPSema, DstFPSema, Loc,
- DstTy->isIntegerType());
-}
-
-Value *ScalarExprEmitter::EmitFixedPointConversion(
- Value *Src, FixedPointSemantics &SrcFPSema, FixedPointSemantics &DstFPSema,
- SourceLocation Loc, bool DstIsInteger) {
- using llvm::APInt;
- using llvm::ConstantInt;
- using llvm::Value;
-
- unsigned SrcWidth = SrcFPSema.getWidth();
- unsigned DstWidth = DstFPSema.getWidth();
- unsigned SrcScale = SrcFPSema.getScale();
- unsigned DstScale = DstFPSema.getScale();
- bool SrcIsSigned = SrcFPSema.isSigned();
- bool DstIsSigned = DstFPSema.isSigned();
-
- llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
-
- Value *Result = Src;
- unsigned ResultWidth = SrcWidth;
-
- // Downscale.
- if (DstScale < SrcScale) {
- // When converting to integers, we round towards zero. For negative numbers,
- // right shifting rounds towards negative infinity. In this case, we can
- // just round up before shifting.
- if (DstIsInteger && SrcIsSigned) {
- Value *Zero = llvm::Constant::getNullValue(Result->getType());
- Value *IsNegative = Builder.CreateICmpSLT(Result, Zero);
- Value *LowBits = ConstantInt::get(
- CGF.getLLVMContext(), APInt::getLowBitsSet(ResultWidth, SrcScale));
- Value *Rounded = Builder.CreateAdd(Result, LowBits);
- Result = Builder.CreateSelect(IsNegative, Rounded, Result);
- }
-
- Result = SrcIsSigned
- ? Builder.CreateAShr(Result, SrcScale - DstScale, "downscale")
- : Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
- }
-
- if (!DstFPSema.isSaturated()) {
- // Resize.
- Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
-
- // Upscale.
- if (DstScale > SrcScale)
- Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
- } else {
- // Adjust the number of fractional bits.
- if (DstScale > SrcScale) {
- // Compare to DstWidth to prevent resizing twice.
- ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth);
- llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
- Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
- Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
- }
-
- // Handle saturation.
- bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
- if (LessIntBits) {
- Value *Max = ConstantInt::get(
- CGF.getLLVMContext(),
- APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
- Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
- : Builder.CreateICmpUGT(Result, Max);
- Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
- }
- // Cannot overflow min to dest type if src is unsigned since all fixed
- // point types can cover the unsigned min of 0.
- if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
- Value *Min = ConstantInt::get(
- CGF.getLLVMContext(),
- APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
- Value *TooLow = Builder.CreateICmpSLT(Result, Min);
- Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
- }
-
- // Resize the integer part to get the final destination size.
- if (ResultWidth != DstWidth)
- Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
+ llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
+ llvm::Value *Result;
+ if (SrcTy->isRealFloatingType())
+ Result = FPBuilder.CreateFloatingToFixed(Src,
+ CGF.getContext().getFixedPointSemantics(DstTy));
+ else if (DstTy->isRealFloatingType())
+ Result = FPBuilder.CreateFixedToFloating(Src,
+ CGF.getContext().getFixedPointSemantics(SrcTy),
+ ConvertType(DstTy));
+ else {
+ auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
+ auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
+
+ if (DstTy->isIntegerType())
+ Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
+ DstFPSema.getWidth(),
+ DstFPSema.isSigned());
+ else if (SrcTy->isIntegerType())
+ Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
+ DstFPSema);
+ else
+ Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
}
return Result;
}
@@ -1630,12 +1557,12 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
Value *Mask;
- llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
+ auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
unsigned LHSElts = LTy->getNumElements();
Mask = RHS;
- llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
+ auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
// Mask off the high bits of each shuffle index.
Value *MaskBits =
@@ -1840,7 +1767,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
return Visit(E->getInit(0));
}
- unsigned ResElts = VType->getNumElements();
+ unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
// Loop over initializers collecting the Value for each, and remembering
// whether the source was swizzle (ExtVectorElementExpr). This will allow
@@ -1864,7 +1791,8 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (isa<ExtVectorElementExpr>(IE)) {
llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
- if (EI->getVectorOperandType()->getNumElements() == ResElts) {
+ if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
+ ->getNumElements() == ResElts) {
llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
Value *LHS = nullptr, *RHS = nullptr;
if (CurIdx == 0) {
@@ -1902,7 +1830,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
continue;
}
- unsigned InitElts = VVT->getNumElements();
+ unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
// If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
// input is the same width as the vector being constructed, generate an
@@ -1911,7 +1839,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (isa<ExtVectorElementExpr>(IE)) {
llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
Value *SVOp = SVI->getOperand(0);
- llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
+ auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
if (OpTy->getNumElements() == ResElts) {
for (unsigned j = 0; j != CurIdx; ++j) {
@@ -1940,8 +1868,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
for (unsigned j = 0; j != InitElts; ++j)
Args.push_back(j);
Args.resize(ResElts, -1);
- Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT), Args,
- "vext");
+ Init = Builder.CreateShuffleVector(Init, Args, "vext");
Args.clear();
for (unsigned j = 0; j != CurIdx; ++j)
@@ -2079,6 +2006,66 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
}
+ // If Src is a fixed vector and Dst is a scalable vector, and both have the
+ // same element type, use the llvm.experimental.vector.insert intrinsic to
+ // perform the bitcast.
+ if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
+ if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
+ if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
+ llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
+ llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
+ return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
+ "castScalableSve");
+ }
+ }
+ }
+
+ // If Src is a scalable vector and Dst is a fixed vector, and both have the
+ // same element type, use the llvm.experimental.vector.extract intrinsic to
+ // perform the bitcast.
+ if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
+ if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
+ if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
+ return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
+ }
+ }
+ }
+
+ // Perform VLAT <-> VLST bitcast through memory.
+ // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
+ // require the element types of the vectors to be the same, we
+ // need to keep this around for casting between predicates, or more
+ // generally for bitcasts between VLAT <-> VLST where the element
+ // types of the vectors are not the same, until we figure out a better
+ // way of doing these casts.
+ if ((isa<llvm::FixedVectorType>(SrcTy) &&
+ isa<llvm::ScalableVectorType>(DstTy)) ||
+ (isa<llvm::ScalableVectorType>(SrcTy) &&
+ isa<llvm::FixedVectorType>(DstTy))) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ // Call expressions can't have a scalar return unless the return type
+ // is a reference type so an lvalue can't be emitted. Create a temp
+ // alloca to store the call, bitcast the address then load.
+ QualType RetTy = CE->getCallReturnType(CGF.getContext());
+ Address Addr =
+ CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-call-rvalue");
+ LValue LV = CGF.MakeAddrLValue(Addr, RetTy);
+ CGF.EmitStoreOfScalar(Src, LV);
+ Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
+ "castFixedSve");
+ LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
+ DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
+ return EmitLoadOfLValue(DestLV, CE->getExprLoc());
+ }
+
+ Address Addr = EmitLValue(E).getAddress(CGF);
+ Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
+ LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
+ DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
+ return EmitLoadOfLValue(DestLV, CE->getExprLoc());
+ }
+
return Builder.CreateBitCast(Src, DstTy);
}
case CK_AddressSpaceConversion: {
@@ -2247,7 +2234,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
llvm::Type *DstTy = ConvertType(DestTy);
Value *Elt = Visit(const_cast<Expr*>(E));
// Splat the element across to all elements
- unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
return Builder.CreateVectorSplat(NumElements, Elt, "splat");
}
@@ -2289,8 +2276,12 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
case CK_FloatingCast:
+ case CK_FixedPointToFloating:
+ case CK_FloatingToFixedPoint: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
return EmitScalarConversion(Visit(E), E->getType(), DestTy,
CE->getExprLoc());
+ }
case CK_BooleanToSignedIntegral: {
ScalarConversionOpts Opts;
Opts.TreatBooleanAsSigned = true;
@@ -2301,8 +2292,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return EmitIntToBoolConversion(Visit(E));
case CK_PointerToBoolean:
return EmitPointerToBoolConversion(Visit(E), E->getType());
- case CK_FloatingToBoolean:
+ case CK_FloatingToBoolean: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
return EmitFloatToBoolConversion(Visit(E));
+ }
case CK_MemberPointerToBoolean: {
llvm::Value *MemPtr = Visit(E);
const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
@@ -2599,6 +2592,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
} else if (type->isRealFloatingType()) {
// Add the inc/dec to the real part.
llvm::Value *amt;
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
// Another special case: half FP increment should be done via float
@@ -2667,14 +2661,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Now, convert from our invented integer literal to the type of the unary
// op. This will upscale and saturate if necessary. This value can become
// undef in some cases.
- FixedPointSemantics SrcSema =
- FixedPointSemantics::GetIntegerSemantics(value->getType()
- ->getScalarSizeInBits(),
- /*IsSigned=*/true);
- FixedPointSemantics DstSema =
- CGF.getContext().getFixedPointSemantics(Info.Ty);
- Info.RHS = EmitFixedPointConversion(Info.RHS, SrcSema, DstSema,
- E->getExprLoc());
+ llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
+ auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
+ Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
value = EmitFixedPointBinOp(Info);
// Objective-C pointer types.
@@ -3055,6 +3044,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
else
OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
SourceLocation Loc = E->getExprLoc();
OpInfo.LHS =
EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
@@ -3214,6 +3204,7 @@ Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
unsigned IID;
unsigned OpID = 0;
+ SanitizerHandler OverflowKind;
bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
switch (Ops.Opcode) {
@@ -3222,18 +3213,21 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
OpID = 1;
IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
llvm::Intrinsic::uadd_with_overflow;
+ OverflowKind = SanitizerHandler::AddOverflow;
break;
case BO_Sub:
case BO_SubAssign:
OpID = 2;
IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
llvm::Intrinsic::usub_with_overflow;
+ OverflowKind = SanitizerHandler::SubOverflow;
break;
case BO_Mul:
case BO_MulAssign:
OpID = 3;
IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
llvm::Intrinsic::umul_with_overflow;
+ OverflowKind = SanitizerHandler::MulOverflow;
break;
default:
llvm_unreachable("Unsupported operation for overflow detection");
@@ -3263,7 +3257,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
: SanitizerKind::UnsignedIntegerOverflow;
EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
} else
- CGF.EmitTrapCheck(Builder.CreateNot(overflow));
+ CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
return result;
}
@@ -3597,91 +3591,52 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
- // Convert the operands to the full precision type.
- Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
- op.E->getExprLoc());
- Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
- op.E->getExprLoc());
-
// Perform the actual operation.
Value *Result;
+ llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
switch (op.Opcode) {
case BO_AddAssign:
- case BO_Add: {
- if (CommonFixedSema.isSaturated()) {
- llvm::Intrinsic::ID IID = CommonFixedSema.isSigned()
- ? llvm::Intrinsic::sadd_sat
- : llvm::Intrinsic::uadd_sat;
- Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
- } else {
- Result = Builder.CreateAdd(FullLHS, FullRHS);
- }
+ case BO_Add:
+ Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
break;
- }
case BO_SubAssign:
- case BO_Sub: {
- if (CommonFixedSema.isSaturated()) {
- llvm::Intrinsic::ID IID = CommonFixedSema.isSigned()
- ? llvm::Intrinsic::ssub_sat
- : llvm::Intrinsic::usub_sat;
- Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
- } else {
- Result = Builder.CreateSub(FullLHS, FullRHS);
- }
+ case BO_Sub:
+ Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
break;
- }
case BO_MulAssign:
- case BO_Mul: {
- llvm::Intrinsic::ID IID;
- if (CommonFixedSema.isSaturated())
- IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::smul_fix_sat
- : llvm::Intrinsic::umul_fix_sat;
- else
- IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::smul_fix
- : llvm::Intrinsic::umul_fix;
- Result = Builder.CreateIntrinsic(IID, {FullLHS->getType()},
- {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())});
+ case BO_Mul:
+ Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
break;
- }
case BO_DivAssign:
- case BO_Div: {
- llvm::Intrinsic::ID IID;
- if (CommonFixedSema.isSaturated())
- IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::sdiv_fix_sat
- : llvm::Intrinsic::udiv_fix_sat;
- else
- IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::sdiv_fix
- : llvm::Intrinsic::udiv_fix;
- Result = Builder.CreateIntrinsic(IID, {FullLHS->getType()},
- {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())});
- break;
- }
+ case BO_Div:
+ Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
+ break;
+ case BO_ShlAssign:
+ case BO_Shl:
+ Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
+ break;
+ case BO_ShrAssign:
+ case BO_Shr:
+ Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
+ break;
case BO_LT:
- return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS)
- : Builder.CreateICmpULT(FullLHS, FullRHS);
+ return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
case BO_GT:
- return CommonFixedSema.isSigned() ? Builder.CreateICmpSGT(FullLHS, FullRHS)
- : Builder.CreateICmpUGT(FullLHS, FullRHS);
+ return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
case BO_LE:
- return CommonFixedSema.isSigned() ? Builder.CreateICmpSLE(FullLHS, FullRHS)
- : Builder.CreateICmpULE(FullLHS, FullRHS);
+ return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
case BO_GE:
- return CommonFixedSema.isSigned() ? Builder.CreateICmpSGE(FullLHS, FullRHS)
- : Builder.CreateICmpUGE(FullLHS, FullRHS);
+ return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
case BO_EQ:
// For equality operations, we assume any padding bits on unsigned types are
// zero'd out. They could be overwritten through non-saturating operations
// that cause overflow, but this leads to undefined behavior.
- return Builder.CreateICmpEQ(FullLHS, FullRHS);
+ return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
case BO_NE:
- return Builder.CreateICmpNE(FullLHS, FullRHS);
- case BO_Shl:
- case BO_Shr:
+ return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
case BO_Cmp:
case BO_LAnd:
case BO_LOr:
- case BO_ShlAssign:
- case BO_ShrAssign:
llvm_unreachable("Found unimplemented fixed point binary operation");
case BO_PtrMemD:
case BO_PtrMemI:
@@ -3698,9 +3653,12 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
llvm_unreachable("Found unsupported binary operation for fixed point types.");
}
+ bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
+ BinaryOperator::isShiftAssignOp(op.Opcode);
// Convert to the result type.
- return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema,
- op.E->getExprLoc());
+ return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
+ : CommonFixedSema,
+ ResultFixedSema);
}
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
@@ -3827,16 +3785,24 @@ Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
}
Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
+ // TODO: This misses out on the sanitizer check below.
+ if (Ops.isFixedPointOp())
+ return EmitFixedPointBinOp(Ops);
+
// LLVM requires the LHS and RHS to be the same type: promote or truncate the
// RHS to the same size as the LHS.
Value *RHS = Ops.RHS;
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
- Ops.Ty->hasSignedIntegerRepresentation() &&
- !CGF.getLangOpts().isSignedOverflowDefined() &&
- !CGF.getLangOpts().CPlusPlus20;
+ bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
+ Ops.Ty->hasSignedIntegerRepresentation() &&
+ !CGF.getLangOpts().isSignedOverflowDefined() &&
+ !CGF.getLangOpts().CPlusPlus20;
+ bool SanitizeUnsignedBase =
+ CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
+ Ops.Ty->hasUnsignedIntegerRepresentation();
+ bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
// OpenCL 6.3j: shift values are effectively % word size of LHS.
if (CGF.getLangOpts().OpenCL)
@@ -3869,11 +3835,12 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
/*NUW*/ true, /*NSW*/ true),
"shl.check");
- if (CGF.getLangOpts().CPlusPlus) {
+ if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
// In C99, we are not permitted to shift a 1 bit into the sign bit.
// Under C++11's rules, shifting a 1 bit into the sign bit is
// OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
// define signed left shifts, so we use the C99 and C++11 rules there).
+ // Unsigned shifts can always shift into the top bit.
llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
}
@@ -3883,7 +3850,9 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
BaseCheck->addIncoming(Builder.getTrue(), Orig);
BaseCheck->addIncoming(ValidBase, CheckShiftBase);
- Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
+ Checks.push_back(std::make_pair(
+ BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
+ : SanitizerKind::UnsignedShiftBase));
}
assert(!Checks.empty());
@@ -3894,6 +3863,10 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
}
Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
+ // TODO: This misses out on the sanitizer check below.
+ if (Ops.isFixedPointOp())
+ return EmitFixedPointBinOp(Ops);
+
// LLVM requires the LHS and RHS to be the same type: promote or truncate the
// RHS to the same size as the LHS.
Value *RHS = Ops.RHS;
@@ -3956,6 +3929,12 @@ static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
case BuiltinType::Double:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
+ case BuiltinType::UInt128:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
+ : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
+ case BuiltinType::Int128:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
+ : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
}
}
@@ -4223,6 +4202,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
}
+ bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
llvm::Type *ResTy = ConvertType(E->getType());
// If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
@@ -4233,6 +4213,22 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
CGF.incrementProfileCounter(E);
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ // If we're generating for profiling or coverage, generate a branch to a
+ // block that increments the RHS counter needed to track branch condition
+ // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
+ // "FalseBlock" after the increment is done.
+ if (InstrumentRegions &&
+ CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
+ llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
+ llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
+ Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
+ CGF.EmitBlock(RHSBlockCnt);
+ CGF.incrementProfileCounter(E->getRHS());
+ CGF.EmitBranch(FBlock);
+ CGF.EmitBlock(FBlock);
+ }
+
// ZExt result to int or bool.
return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
}
@@ -4269,6 +4265,19 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// Reaquire the RHS block, as there may be subblocks inserted.
RHSBlock = Builder.GetInsertBlock();
+ // If we're generating for profiling or coverage, generate a branch on the
+ // RHS to a block that increments the RHS true counter needed to track branch
+ // condition coverage.
+ if (InstrumentRegions &&
+ CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
+ llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
+ Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
+ CGF.EmitBlock(RHSBlockCnt);
+ CGF.incrementProfileCounter(E->getRHS());
+ CGF.EmitBranch(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlockCnt);
+ }
+
// Emit an unconditional branch from this block to ContBlock.
{
// There is no need to emit line number for unconditional branch.
@@ -4309,6 +4318,7 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
}
+ bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
llvm::Type *ResTy = ConvertType(E->getType());
// If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
@@ -4319,6 +4329,22 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
CGF.incrementProfileCounter(E);
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ // If we're generating for profiling or coverage, generate a branch to a
+ // block that increments the RHS counter need to track branch condition
+ // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
+ // "FalseBlock" after the increment is done.
+ if (InstrumentRegions &&
+ CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
+ llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
+ llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
+ Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
+ CGF.EmitBlock(RHSBlockCnt);
+ CGF.incrementProfileCounter(E->getRHS());
+ CGF.EmitBranch(FBlock);
+ CGF.EmitBlock(FBlock);
+ }
+
// ZExt result to int or bool.
return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
}
@@ -4359,6 +4385,19 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
// Reaquire the RHS block, as there may be subblocks inserted.
RHSBlock = Builder.GetInsertBlock();
+ // If we're generating for profiling or coverage, generate a branch on the
+ // RHS to a block that increments the RHS true counter needed to track branch
+ // condition coverage.
+ if (InstrumentRegions &&
+ CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
+ llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
+ Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
+ CGF.EmitBlock(RHSBlockCnt);
+ CGF.incrementProfileCounter(E->getRHS());
+ CGF.EmitBranch(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlockCnt);
+ }
+
// Emit an unconditional branch from this block to ContBlock. Insert an entry
// into the phi node for the edge with the value of RHSCond.
CGF.EmitBlock(ContBlock);
@@ -4441,7 +4480,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *RHS = Visit(rhsExpr);
llvm::Type *condType = ConvertType(condExpr->getType());
- llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
+ auto *vecTy = cast<llvm::FixedVectorType>(condType);
unsigned numElem = vecTy->getNumElements();
llvm::Type *elemType = vecTy->getElementType();
@@ -4587,9 +4626,8 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
// Convert a vec3 to vec4, or vice versa.
static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
Value *Src, unsigned NumElementsDst) {
- llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
static constexpr int Mask[] = {0, 1, 2, -1};
- return Builder.CreateShuffleVector(Src, UnV,
+ return Builder.CreateShuffleVector(Src,
llvm::makeArrayRef(Mask, NumElementsDst));
}
@@ -4644,10 +4682,14 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
llvm::Type *DstTy = ConvertType(E->getType());
llvm::Type *SrcTy = Src->getType();
- unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ?
- cast<llvm::VectorType>(SrcTy)->getNumElements() : 0;
- unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ?
- cast<llvm::VectorType>(DstTy)->getNumElements() : 0;
+ unsigned NumElementsSrc =
+ isa<llvm::VectorType>(SrcTy)
+ ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
+ : 0;
+ unsigned NumElementsDst =
+ isa<llvm::VectorType>(DstTy)
+ ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
+ : 0;
// Going from vec3 to non-vec3 is a special case and requires a shuffle
// vector to get a vec4, then a bitcast if the target type is different.
diff --git a/clang/lib/CodeGen/CGLoopInfo.cpp b/clang/lib/CodeGen/CGLoopInfo.cpp
index 78da72eda0cf..8ba40599cfaf 100644
--- a/clang/lib/CodeGen/CGLoopInfo.cpp
+++ b/clang/lib/CodeGen/CGLoopInfo.cpp
@@ -24,8 +24,7 @@ MDNode *
LoopInfo::createLoopPropertiesMetadata(ArrayRef<Metadata *> LoopProperties) {
LLVMContext &Ctx = Header->getContext();
SmallVector<Metadata *, 4> NewLoopProperties;
- TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
- NewLoopProperties.push_back(TempNode.get());
+ NewLoopProperties.push_back(nullptr);
NewLoopProperties.append(LoopProperties.begin(), LoopProperties.end());
MDNode *LoopID = MDNode::getDistinct(Ctx, NewLoopProperties);
@@ -58,8 +57,7 @@ MDNode *LoopInfo::createPipeliningMetadata(const LoopAttributes &Attrs,
}
SmallVector<Metadata *, 4> Args;
- TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
- Args.push_back(TempNode.get());
+ Args.push_back(nullptr);
Args.append(LoopProperties.begin(), LoopProperties.end());
if (Attrs.PipelineInitiationInterval > 0) {
@@ -113,8 +111,7 @@ LoopInfo::createPartialUnrollMetadata(const LoopAttributes &Attrs,
FollowupHasTransforms);
SmallVector<Metadata *, 4> Args;
- TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
- Args.push_back(TempNode.get());
+ Args.push_back(nullptr);
Args.append(LoopProperties.begin(), LoopProperties.end());
// Setting unroll.count
@@ -176,8 +173,7 @@ LoopInfo::createUnrollAndJamMetadata(const LoopAttributes &Attrs,
FollowupHasTransforms);
SmallVector<Metadata *, 4> Args;
- TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
- Args.push_back(TempNode.get());
+ Args.push_back(nullptr);
Args.append(LoopProperties.begin(), LoopProperties.end());
// Setting unroll_and_jam.count
@@ -221,7 +217,8 @@ LoopInfo::createLoopVectorizeMetadata(const LoopAttributes &Attrs,
Enabled = false;
else if (Attrs.VectorizeEnable != LoopAttributes::Unspecified ||
Attrs.VectorizePredicateEnable != LoopAttributes::Unspecified ||
- Attrs.InterleaveCount != 0 || Attrs.VectorizeWidth != 0)
+ Attrs.InterleaveCount != 0 || Attrs.VectorizeWidth != 0 ||
+ Attrs.VectorizeScalable != LoopAttributes::Unspecified)
Enabled = true;
if (Enabled != true) {
@@ -250,8 +247,7 @@ LoopInfo::createLoopVectorizeMetadata(const LoopAttributes &Attrs,
FollowupHasTransforms);
SmallVector<Metadata *, 4> Args;
- TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
- Args.push_back(TempNode.get());
+ Args.push_back(nullptr);
Args.append(LoopProperties.begin(), LoopProperties.end());
// Setting vectorize.predicate
@@ -276,6 +272,16 @@ LoopInfo::createLoopVectorizeMetadata(const LoopAttributes &Attrs,
MDString::get(Ctx, "llvm.loop.vectorize.width"),
ConstantAsMetadata::get(ConstantInt::get(llvm::Type::getInt32Ty(Ctx),
Attrs.VectorizeWidth))};
+
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
+ if (Attrs.VectorizeScalable != LoopAttributes::Unspecified) {
+ bool IsScalable = Attrs.VectorizeScalable == LoopAttributes::Enable;
+ Metadata *Vals[] = {
+ MDString::get(Ctx, "llvm.loop.vectorize.scalable.enable"),
+ ConstantAsMetadata::get(
+ ConstantInt::get(llvm::Type::getInt1Ty(Ctx), IsScalable))};
Args.push_back(MDNode::get(Ctx, Vals));
}
@@ -291,10 +297,16 @@ LoopInfo::createLoopVectorizeMetadata(const LoopAttributes &Attrs,
// vectorize.enable is set if:
// 1) loop hint vectorize.enable is set, or
// 2) it is implied when vectorize.predicate is set, or
- // 3) it is implied when vectorize.width is set.
+ // 3) it is implied when vectorize.width is set to a value > 1
+ // 4) it is implied when vectorize.scalable.enable is true
+ // 5) it is implied when vectorize.width is unset (0) and the user
+ // explicitly requested fixed-width vectorization, i.e.
+ // vectorize.scalable.enable is false.
if (Attrs.VectorizeEnable != LoopAttributes::Unspecified ||
- IsVectorPredicateEnabled ||
- Attrs.VectorizeWidth > 1 ) {
+ IsVectorPredicateEnabled || Attrs.VectorizeWidth > 1 ||
+ Attrs.VectorizeScalable == LoopAttributes::Enable ||
+ (Attrs.VectorizeScalable == LoopAttributes::Disable &&
+ Attrs.VectorizeWidth != 1)) {
bool AttrVal = Attrs.VectorizeEnable != LoopAttributes::Disable;
Args.push_back(
MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"),
@@ -307,7 +319,7 @@ LoopInfo::createLoopVectorizeMetadata(const LoopAttributes &Attrs,
Ctx,
{MDString::get(Ctx, "llvm.loop.vectorize.followup_all"), Followup}));
- MDNode *LoopID = MDNode::get(Ctx, Args);
+ MDNode *LoopID = MDNode::getDistinct(Ctx, Args);
LoopID->replaceOperandWith(0, LoopID);
HasUserTransforms = true;
return LoopID;
@@ -344,8 +356,7 @@ LoopInfo::createLoopDistributeMetadata(const LoopAttributes &Attrs,
createLoopVectorizeMetadata(Attrs, LoopProperties, FollowupHasTransforms);
SmallVector<Metadata *, 4> Args;
- TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
- Args.push_back(TempNode.get());
+ Args.push_back(nullptr);
Args.append(LoopProperties.begin(), LoopProperties.end());
Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.distribute.enable"),
@@ -359,7 +370,7 @@ LoopInfo::createLoopDistributeMetadata(const LoopAttributes &Attrs,
Ctx,
{MDString::get(Ctx, "llvm.loop.distribute.followup_all"), Followup}));
- MDNode *LoopID = MDNode::get(Ctx, Args);
+ MDNode *LoopID = MDNode::getDistinct(Ctx, Args);
LoopID->replaceOperandWith(0, LoopID);
HasUserTransforms = true;
return LoopID;
@@ -389,8 +400,7 @@ MDNode *LoopInfo::createFullUnrollMetadata(const LoopAttributes &Attrs,
}
SmallVector<Metadata *, 4> Args;
- TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
- Args.push_back(TempNode.get());
+ Args.push_back(nullptr);
Args.append(LoopProperties.begin(), LoopProperties.end());
Args.push_back(MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full")));
@@ -418,10 +428,14 @@ MDNode *LoopInfo::createMetadata(
LoopProperties.push_back(EndLoc.getAsMDNode());
}
+ LLVMContext &Ctx = Header->getContext();
+ if (Attrs.MustProgress)
+ LoopProperties.push_back(
+ MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.mustprogress")));
+
assert(!!AccGroup == Attrs.IsParallel &&
"There must be an access group iff the loop is parallel");
if (Attrs.IsParallel) {
- LLVMContext &Ctx = Header->getContext();
LoopProperties.push_back(MDNode::get(
Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccGroup}));
}
@@ -436,13 +450,15 @@ LoopAttributes::LoopAttributes(bool IsParallel)
UnrollEnable(LoopAttributes::Unspecified),
UnrollAndJamEnable(LoopAttributes::Unspecified),
VectorizePredicateEnable(LoopAttributes::Unspecified), VectorizeWidth(0),
- InterleaveCount(0), UnrollCount(0), UnrollAndJamCount(0),
+ VectorizeScalable(LoopAttributes::Unspecified), InterleaveCount(0),
+ UnrollCount(0), UnrollAndJamCount(0),
DistributeEnable(LoopAttributes::Unspecified), PipelineDisabled(false),
- PipelineInitiationInterval(0) {}
+ PipelineInitiationInterval(0), MustProgress(false) {}
void LoopAttributes::clear() {
IsParallel = false;
VectorizeWidth = 0;
+ VectorizeScalable = LoopAttributes::Unspecified;
InterleaveCount = 0;
UnrollCount = 0;
UnrollAndJamCount = 0;
@@ -453,6 +469,7 @@ void LoopAttributes::clear() {
DistributeEnable = LoopAttributes::Unspecified;
PipelineDisabled = false;
PipelineInitiationInterval = 0;
+ MustProgress = false;
}
LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs,
@@ -468,6 +485,7 @@ LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs,
}
if (!Attrs.IsParallel && Attrs.VectorizeWidth == 0 &&
+ Attrs.VectorizeScalable == LoopAttributes::Unspecified &&
Attrs.InterleaveCount == 0 && Attrs.UnrollCount == 0 &&
Attrs.UnrollAndJamCount == 0 && !Attrs.PipelineDisabled &&
Attrs.PipelineInitiationInterval == 0 &&
@@ -476,7 +494,7 @@ LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs,
Attrs.UnrollEnable == LoopAttributes::Unspecified &&
Attrs.UnrollAndJamEnable == LoopAttributes::Unspecified &&
Attrs.DistributeEnable == LoopAttributes::Unspecified && !StartLoc &&
- !EndLoc)
+ !EndLoc && !Attrs.MustProgress)
return;
TempLoopID = MDNode::getTemporary(Header->getContext(), None);
@@ -503,6 +521,7 @@ void LoopInfo::finish() {
BeforeJam.IsParallel = AfterJam.IsParallel = Attrs.IsParallel;
BeforeJam.VectorizeWidth = Attrs.VectorizeWidth;
+ BeforeJam.VectorizeScalable = Attrs.VectorizeScalable;
BeforeJam.InterleaveCount = Attrs.InterleaveCount;
BeforeJam.VectorizeEnable = Attrs.VectorizeEnable;
BeforeJam.DistributeEnable = Attrs.DistributeEnable;
@@ -545,7 +564,8 @@ void LoopInfo::finish() {
SmallVector<Metadata *, 1> BeforeLoopProperties;
if (BeforeJam.VectorizeEnable != LoopAttributes::Unspecified ||
BeforeJam.VectorizePredicateEnable != LoopAttributes::Unspecified ||
- BeforeJam.InterleaveCount != 0 || BeforeJam.VectorizeWidth != 0)
+ BeforeJam.InterleaveCount != 0 || BeforeJam.VectorizeWidth != 0 ||
+ BeforeJam.VectorizeScalable == LoopAttributes::Enable)
BeforeLoopProperties.push_back(
MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.isvectorized")));
@@ -577,8 +597,7 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
const clang::CodeGenOptions &CGOpts,
ArrayRef<const clang::Attr *> Attrs,
const llvm::DebugLoc &StartLoc,
- const llvm::DebugLoc &EndLoc) {
-
+ const llvm::DebugLoc &EndLoc, bool MustProgress) {
// Identify loop hint attributes from Attrs.
for (const auto *Attr : Attrs) {
const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr);
@@ -623,6 +642,7 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
case LoopHintAttr::Vectorize:
// Disable vectorization by specifying a width of 1.
setVectorizeWidth(1);
+ setVectorizeScalable(LoopAttributes::Unspecified);
break;
case LoopHintAttr::Interleave:
// Disable interleaving by speciyfing a count of 1.
@@ -724,11 +744,23 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
break;
}
break;
- case LoopHintAttr::Numeric:
+ case LoopHintAttr::FixedWidth:
+ case LoopHintAttr::ScalableWidth:
switch (Option) {
case LoopHintAttr::VectorizeWidth:
- setVectorizeWidth(ValueInt);
+ setVectorizeScalable(State == LoopHintAttr::ScalableWidth
+ ? LoopAttributes::Enable
+ : LoopAttributes::Disable);
+ if (LH->getValue())
+ setVectorizeWidth(ValueInt);
break;
+ default:
+ llvm_unreachable("Options cannot be used with 'scalable' hint.");
+ break;
+ }
+ break;
+ case LoopHintAttr::Numeric:
+ switch (Option) {
case LoopHintAttr::InterleaveCount:
setInterleaveCount(ValueInt);
break;
@@ -745,6 +777,7 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
case LoopHintAttr::UnrollAndJam:
case LoopHintAttr::VectorizePredicate:
case LoopHintAttr::Vectorize:
+ case LoopHintAttr::VectorizeWidth:
case LoopHintAttr::Interleave:
case LoopHintAttr::Distribute:
case LoopHintAttr::PipelineDisabled:
@@ -755,6 +788,8 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
}
}
+ setMustProgress(MustProgress);
+
if (CGOpts.OptimizationLevel > 0)
// Disable unrolling for the loop, if unrolling is disabled (via
// -fno-unroll-loops) and no pragmas override the decision.
diff --git a/clang/lib/CodeGen/CGLoopInfo.h b/clang/lib/CodeGen/CGLoopInfo.h
index e379c64c99a8..856e892f712e 100644
--- a/clang/lib/CodeGen/CGLoopInfo.h
+++ b/clang/lib/CodeGen/CGLoopInfo.h
@@ -58,6 +58,9 @@ struct LoopAttributes {
/// Value for llvm.loop.vectorize.width metadata.
unsigned VectorizeWidth;
+ // Value for llvm.loop.vectorize.scalable.enable
+ LVEnableState VectorizeScalable;
+
/// Value for llvm.loop.interleave.count metadata.
unsigned InterleaveCount;
@@ -75,6 +78,9 @@ struct LoopAttributes {
/// Value for llvm.loop.pipeline.iicount metadata.
unsigned PipelineInitiationInterval;
+
+ /// Value for whether the loop is required to make progress.
+ bool MustProgress;
};
/// Information used when generating a structured loop.
@@ -205,7 +211,7 @@ public:
void push(llvm::BasicBlock *Header, clang::ASTContext &Ctx,
const clang::CodeGenOptions &CGOpts,
llvm::ArrayRef<const Attr *> Attrs, const llvm::DebugLoc &StartLoc,
- const llvm::DebugLoc &EndLoc);
+ const llvm::DebugLoc &EndLoc, bool MustProgress = false);
/// End the current loop.
void pop();
@@ -255,6 +261,10 @@ public:
/// Set the vectorize width for the next loop pushed.
void setVectorizeWidth(unsigned W) { StagedAttrs.VectorizeWidth = W; }
+ void setVectorizeScalable(const LoopAttributes::LVEnableState &State) {
+ StagedAttrs.VectorizeScalable = State;
+ }
+
/// Set the interleave count for the next loop pushed.
void setInterleaveCount(unsigned C) { StagedAttrs.InterleaveCount = C; }
@@ -272,6 +282,9 @@ public:
StagedAttrs.PipelineInitiationInterval = C;
}
+ /// Set no progress for the next loop pushed.
+ void setMustProgress(bool P) { StagedAttrs.MustProgress = P; }
+
private:
/// Returns true if there is LoopInfo on the stack.
bool hasInfo() const { return !Active.empty(); }
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index cd2b84f5dd20..3f930c76fe0a 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -23,6 +23,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/BinaryFormat/MachO.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
using namespace clang;
@@ -445,6 +446,75 @@ CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend(
Method);
}
+static void AppendFirstImpliedRuntimeProtocols(
+ const ObjCProtocolDecl *PD,
+ llvm::UniqueVector<const ObjCProtocolDecl *> &PDs) {
+ if (!PD->isNonRuntimeProtocol()) {
+ const auto *Can = PD->getCanonicalDecl();
+ PDs.insert(Can);
+ return;
+ }
+
+ for (const auto *ParentPD : PD->protocols())
+ AppendFirstImpliedRuntimeProtocols(ParentPD, PDs);
+}
+
+std::vector<const ObjCProtocolDecl *>
+CGObjCRuntime::GetRuntimeProtocolList(ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ std::vector<const ObjCProtocolDecl *> RuntimePds;
+ llvm::DenseSet<const ObjCProtocolDecl *> NonRuntimePDs;
+
+ for (; begin != end; ++begin) {
+ const auto *It = *begin;
+ const auto *Can = It->getCanonicalDecl();
+ if (Can->isNonRuntimeProtocol())
+ NonRuntimePDs.insert(Can);
+ else
+ RuntimePds.push_back(Can);
+ }
+
+ // If there are no non-runtime protocols then we can just stop now.
+ if (NonRuntimePDs.empty())
+ return RuntimePds;
+
+ // Else we have to search through the non-runtime protocol's inheritancy
+ // hierarchy DAG stopping whenever a branch either finds a runtime protocol or
+ // a non-runtime protocol without any parents. These are the "first-implied"
+ // protocols from a non-runtime protocol.
+ llvm::UniqueVector<const ObjCProtocolDecl *> FirstImpliedProtos;
+ for (const auto *PD : NonRuntimePDs)
+ AppendFirstImpliedRuntimeProtocols(PD, FirstImpliedProtos);
+
+ // Walk the Runtime list to get all protocols implied via the inclusion of
+ // this protocol, e.g. all protocols it inherits from including itself.
+ llvm::DenseSet<const ObjCProtocolDecl *> AllImpliedProtocols;
+ for (const auto *PD : RuntimePds) {
+ const auto *Can = PD->getCanonicalDecl();
+ AllImpliedProtocols.insert(Can);
+ Can->getImpliedProtocols(AllImpliedProtocols);
+ }
+
+ // Similar to above, walk the list of first-implied protocols to find the set
+ // all the protocols implied excluding the listed protocols themselves since
+ // they are not yet a part of the `RuntimePds` list.
+ for (const auto *PD : FirstImpliedProtos) {
+ PD->getImpliedProtocols(AllImpliedProtocols);
+ }
+
+ // From the first-implied list we have to finish building the final protocol
+ // list. If a protocol in the first-implied list was already implied via some
+ // inheritance path through some other protocols then it would be redundant to
+ // add it here and so we skip over it.
+ for (const auto *PD : FirstImpliedProtos) {
+ if (!AllImpliedProtocols.contains(PD)) {
+ RuntimePds.push_back(PD);
+ }
+ }
+
+ return RuntimePds;
+}
+
/// Instead of '[[MyClass alloc] init]', try to generate
/// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the
/// caller side, as well as the optimized objc_alloc.
@@ -850,8 +920,9 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
// Evaluate the ivar's size and alignment.
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
QualType ivarType = ivar->getType();
- std::tie(IvarSize, IvarAlignment) =
- CGM.getContext().getTypeInfoInChars(ivarType);
+ auto TInfo = CGM.getContext().getTypeInfoInChars(ivarType);
+ IvarSize = TInfo.Width;
+ IvarAlignment = TInfo.Align;
// If we have a copy property, we always have to use getProperty/setProperty.
// TODO: we could actually use setProperty and an expression for non-atomics.
@@ -1449,9 +1520,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
ValueDecl *selfDecl = setterMethod->getSelfDecl();
DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(),
VK_LValue, SourceLocation());
- ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
- selfDecl->getType(), CK_LValueToRValue, &self,
- VK_RValue);
+ ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, selfDecl->getType(),
+ CK_LValueToRValue, &self, VK_RValue,
+ FPOptionsOverride());
ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
SourceLocation(), SourceLocation(),
&selfLoad, true, true);
@@ -1462,7 +1533,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
SourceLocation());
ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
argType.getUnqualifiedType(), CK_LValueToRValue,
- &arg, VK_RValue);
+ &arg, VK_RValue, FPOptionsOverride());
// The property type can differ from the ivar type in some situations with
// Objective-C pointer types, we can always bit cast the RHS in these cases.
@@ -1483,9 +1554,8 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
} else if (ivarRef.getType()->isPointerType()) {
argCK = CK_BitCast;
}
- ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
- ivarRef.getType(), argCK, &argLoad,
- VK_RValue);
+ ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, ivarRef.getType(), argCK,
+ &argLoad, VK_RValue, FPOptionsOverride());
Expr *finalArg = &argLoad;
if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
argLoad.getType()))
@@ -2151,6 +2221,12 @@ static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
// Call the function.
llvm::CallBase *Inst = CGF.EmitCallOrInvoke(fn, value);
+ // Mark calls to objc_autorelease as tail on the assumption that methods
+ // overriding autorelease do not touch anything on the stack.
+ if (fnName == "objc_autorelease")
+ if (auto *Call = dyn_cast<llvm::CallInst>(Inst))
+ Call->setTailCall();
+
// Cast the result back to the original type.
return CGF.Builder.CreateBitCast(Inst, origType);
}
@@ -2250,8 +2326,7 @@ llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
emitAutoreleasedReturnValueMarker(*this);
llvm::CallInst::TailCallKind tailKind =
- CGM.getTargetCodeGenInfo()
- .shouldSuppressTailCallsOfRetainAutoreleasedReturnValue()
+ CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail()
? llvm::CallInst::TCK_NoTail
: llvm::CallInst::TCK_None;
return emitARCValueOperation(
@@ -2270,9 +2345,14 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
llvm::Value *
CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) {
emitAutoreleasedReturnValueMarker(*this);
- return emitARCValueOperation(*this, value, nullptr,
- CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue,
- llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue);
+ llvm::CallInst::TailCallKind tailKind =
+ CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail()
+ ? llvm::CallInst::TCK_NoTail
+ : llvm::CallInst::TCK_None;
+ return emitARCValueOperation(
+ *this, value, nullptr,
+ CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue,
+ llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue, tailKind);
}
/// Release the given object.
@@ -2814,45 +2894,57 @@ typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
ValueTransform;
/// Insert code immediately after a call.
+
+// FIXME: We should find a way to emit the runtime call immediately
+// after the call is emitted to eliminate the need for this function.
static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
llvm::Value *value,
ValueTransform doAfterCall,
ValueTransform doFallback) {
- if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
- CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+ if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
// Place the retain immediately following the call.
CGF.Builder.SetInsertPoint(call->getParent(),
++llvm::BasicBlock::iterator(call));
value = doAfterCall(CGF, value);
-
- CGF.Builder.restoreIP(ip);
- return value;
} else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
- CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
-
// Place the retain at the beginning of the normal destination block.
llvm::BasicBlock *BB = invoke->getNormalDest();
CGF.Builder.SetInsertPoint(BB, BB->begin());
value = doAfterCall(CGF, value);
- CGF.Builder.restoreIP(ip);
- return value;
-
// Bitcasts can arise because of related-result returns. Rewrite
// the operand.
} else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
+ // Change the insert point to avoid emitting the fall-back call after the
+ // bitcast.
+ CGF.Builder.SetInsertPoint(bitcast->getParent(), bitcast->getIterator());
llvm::Value *operand = bitcast->getOperand(0);
operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback);
bitcast->setOperand(0, operand);
- return bitcast;
-
- // Generic fall-back case.
+ value = bitcast;
} else {
- // Retain using the non-block variant: we never need to do a copy
- // of a block that's been returned to us.
- return doFallback(CGF, value);
+ auto *phi = dyn_cast<llvm::PHINode>(value);
+ if (phi && phi->getNumIncomingValues() == 2 &&
+ isa<llvm::ConstantPointerNull>(phi->getIncomingValue(1)) &&
+ isa<llvm::CallBase>(phi->getIncomingValue(0))) {
+ // Handle phi instructions that are generated when it's necessary to check
+ // whether the receiver of a message is null.
+ llvm::Value *inVal = phi->getIncomingValue(0);
+ inVal = emitARCOperationAfterCall(CGF, inVal, doAfterCall, doFallback);
+ phi->setIncomingValue(0, inVal);
+ value = phi;
+ } else {
+ // Generic fall-back case.
+ // Retain using the non-block variant: we never need to do a copy
+ // of a block that's been returned to us.
+ value = doFallback(CGF, value);
+ }
}
+
+ CGF.Builder.restoreIP(ip);
+ return value;
}
/// Given that the given expression is some sort of call (which does
@@ -3741,9 +3833,61 @@ CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
return Val;
}
+static unsigned getBaseMachOPlatformID(const llvm::Triple &TT) {
+ switch (TT.getOS()) {
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ return llvm::MachO::PLATFORM_MACOS;
+ case llvm::Triple::IOS:
+ return llvm::MachO::PLATFORM_IOS;
+ case llvm::Triple::TvOS:
+ return llvm::MachO::PLATFORM_TVOS;
+ case llvm::Triple::WatchOS:
+ return llvm::MachO::PLATFORM_WATCHOS;
+ default:
+ return /*Unknown platform*/ 0;
+ }
+}
+
+static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF,
+ const VersionTuple &Version) {
+ CodeGenModule &CGM = CGF.CGM;
+ // Note: we intend to support multi-platform version checks, so reserve
+ // the room for a dual platform checking invocation that will be
+ // implemented in the future.
+ llvm::SmallVector<llvm::Value *, 8> Args;
+
+ auto EmitArgs = [&](const VersionTuple &Version, const llvm::Triple &TT) {
+ Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
+ Args.push_back(
+ llvm::ConstantInt::get(CGM.Int32Ty, getBaseMachOPlatformID(TT)));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min ? *Min : 0));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin ? *SMin : 0));
+ };
+
+ assert(!Version.empty() && "unexpected empty version");
+ EmitArgs(Version, CGM.getTarget().getTriple());
+
+ if (!CGM.IsPlatformVersionAtLeastFn) {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(
+ CGM.Int32Ty, {CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty},
+ false);
+ CGM.IsPlatformVersionAtLeastFn =
+ CGM.CreateRuntimeFunction(FTy, "__isPlatformVersionAtLeast");
+ }
+
+ llvm::Value *Check =
+ CGF.EmitNounwindRuntimeCall(CGM.IsPlatformVersionAtLeastFn, Args);
+ return CGF.Builder.CreateICmpNE(Check,
+ llvm::Constant::getNullValue(CGM.Int32Ty));
+}
+
llvm::Value *
-CodeGenFunction::EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args) {
- assert(Args.size() == 3 && "Expected 3 argument here!");
+CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) {
+ // Darwin uses the new __isPlatformVersionAtLeast family of routines.
+ if (CGM.getTarget().getTriple().isOSDarwin())
+ return emitIsPlatformVersionAtLeast(*this, Version);
if (!CGM.IsOSVersionAtLeastFn) {
llvm::FunctionType *FTy =
@@ -3752,18 +3896,51 @@ CodeGenFunction::EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args) {
CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast");
}
+ Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
+ llvm::Value *Args[] = {
+ llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()),
+ llvm::ConstantInt::get(CGM.Int32Ty, Min ? *Min : 0),
+ llvm::ConstantInt::get(CGM.Int32Ty, SMin ? *SMin : 0),
+ };
+
llvm::Value *CallRes =
EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args);
return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty));
}
+static bool isFoundationNeededForDarwinAvailabilityCheck(
+ const llvm::Triple &TT, const VersionTuple &TargetVersion) {
+ VersionTuple FoundationDroppedInVersion;
+ switch (TT.getOS()) {
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS:
+ FoundationDroppedInVersion = VersionTuple(/*Major=*/13);
+ break;
+ case llvm::Triple::WatchOS:
+ FoundationDroppedInVersion = VersionTuple(/*Major=*/6);
+ break;
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ FoundationDroppedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/15);
+ break;
+ default:
+ llvm_unreachable("Unexpected OS");
+ }
+ return TargetVersion < FoundationDroppedInVersion;
+}
+
void CodeGenModule::emitAtAvailableLinkGuard() {
- if (!IsOSVersionAtLeastFn)
+ if (!IsPlatformVersionAtLeastFn)
return;
// @available requires CoreFoundation only on Darwin.
if (!Target.getTriple().isOSDarwin())
return;
+ // @available doesn't need Foundation on macOS 10.15+, iOS/tvOS 13+, or
+ // watchOS 6+.
+ if (!isFoundationNeededForDarwinAvailabilityCheck(
+ Target.getTriple(), Target.getPlatformMinVersion()))
+ return;
// Add -framework CoreFoundation to the linker commands. We still want to
// emit the core foundation reference down below because otherwise if
// CoreFoundation is not used in the code, the linker won't link the
diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp
index bb9c494ae68e..9825d7bca18c 100644
--- a/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -42,16 +42,6 @@ using namespace CodeGen;
namespace {
-std::string SymbolNameForMethod( StringRef ClassName,
- StringRef CategoryName, const Selector MethodName,
- bool isClassMethod) {
- std::string MethodNameColonStripped = MethodName.getAsString();
- std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
- ':', '_');
- return (Twine(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
- CategoryName + "_" + MethodNameColonStripped).str();
-}
-
/// Class that lazily initialises the runtime function. Avoids inserting the
/// types and the function declaration into a module if they're not used, and
/// avoids constructing the type more than once if it's used more than once.
@@ -1197,8 +1187,11 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
}
llvm::Constant *GenerateCategoryProtocolList(const ObjCCategoryDecl *OCD)
override {
- SmallVector<llvm::Constant*, 16> Protocols;
- for (const auto *PI : OCD->getReferencedProtocols())
+ const auto &ReferencedProtocols = OCD->getReferencedProtocols();
+ auto RuntimeProtocols = GetRuntimeProtocolList(ReferencedProtocols.begin(),
+ ReferencedProtocols.end());
+ SmallVector<llvm::Constant *, 16> Protocols;
+ for (const auto *PI : RuntimeProtocols)
Protocols.push_back(
llvm::ConstantExpr::getBitCast(GenerateProtocolRef(PI),
ProtocolPtrTy));
@@ -1381,7 +1374,9 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
}
SmallVector<llvm::Constant*, 16> Protocols;
- for (const auto *PI : PD->protocols())
+ auto RuntimeProtocols =
+ GetRuntimeProtocolList(PD->protocol_begin(), PD->protocol_end());
+ for (const auto *PI : RuntimeProtocols)
Protocols.push_back(
llvm::ConstantExpr::getBitCast(GenerateProtocolRef(PI),
ProtocolPtrTy));
@@ -1920,8 +1915,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// struct objc_class *sibling_class
classFields.addNullPointer(PtrTy);
// struct objc_protocol_list *protocols;
- SmallVector<llvm::Constant*, 16> Protocols;
- for (const auto *I : classDecl->protocols())
+ auto RuntimeProtocols = GetRuntimeProtocolList(classDecl->protocol_begin(),
+ classDecl->protocol_end());
+ SmallVector<llvm::Constant *, 16> Protocols;
+ for (const auto *I : RuntimeProtocols)
Protocols.push_back(
llvm::ConstantExpr::getBitCast(GenerateProtocolRef(I),
ProtocolPtrTy));
@@ -2823,9 +2820,7 @@ GenerateMethodList(StringRef ClassName,
ASTContext &Context = CGM.getContext();
for (const auto *OMD : Methods) {
llvm::Constant *FnPtr =
- TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
- OMD->getSelector(),
- isClassMethodList));
+ TheModule.getFunction(getSymbolNameForMethod(OMD));
assert(FnPtr && "Can't generate metadata for method that doesn't exist");
auto Method = MethodArray.beginStruct(ObjCMethodTy);
if (isV2ABI) {
@@ -3088,6 +3083,9 @@ CGObjCGNU::GenerateEmptyProtocol(StringRef ProtocolName) {
}
void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ if (PD->isNonRuntimeProtocol())
+ return;
+
std::string ProtocolName = PD->getNameAsString();
// Use the protocol definition, if there is one.
@@ -3240,8 +3238,11 @@ llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
llvm::Constant *CGObjCGNU::GenerateCategoryProtocolList(const
ObjCCategoryDecl *OCD) {
+ const auto &RefPro = OCD->getReferencedProtocols();
+ const auto RuntimeProtos =
+ GetRuntimeProtocolList(RefPro.begin(), RefPro.end());
SmallVector<std::string, 16> Protocols;
- for (const auto *PD : OCD->getReferencedProtocols())
+ for (const auto *PD : RuntimeProtos)
Protocols.push_back(PD->getNameAsString());
return GenerateProtocolList(Protocols);
}
@@ -3511,24 +3512,14 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
ClassMethods.insert(ClassMethods.begin(), OID->classmeth_begin(),
OID->classmeth_end());
- // Collect the same information about synthesized properties, which don't
- // show up in the instance method lists.
- for (auto *propertyImpl : OID->property_impls())
- if (propertyImpl->getPropertyImplementation() ==
- ObjCPropertyImplDecl::Synthesize) {
- auto addPropertyMethod = [&](const ObjCMethodDecl *accessor) {
- if (accessor)
- InstanceMethods.push_back(accessor);
- };
- addPropertyMethod(propertyImpl->getGetterMethodDecl());
- addPropertyMethod(propertyImpl->getSetterMethodDecl());
- }
-
llvm::Constant *Properties = GeneratePropertyList(OID, ClassDecl);
// Collect the names of referenced protocols
+ auto RefProtocols = ClassDecl->protocols();
+ auto RuntimeProtocols =
+ GetRuntimeProtocolList(RefProtocols.begin(), RefProtocols.end());
SmallVector<std::string, 16> Protocols;
- for (const auto *I : ClassDecl->protocols())
+ for (const auto *I : RuntimeProtocols)
Protocols.push_back(I->getNameAsString());
// Get the superclass pointer.
@@ -3873,18 +3864,10 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
- const ObjCCategoryImplDecl *OCD =
- dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
- StringRef CategoryName = OCD ? OCD->getName() : "";
- StringRef ClassName = CD->getName();
- Selector MethodName = OMD->getSelector();
- bool isClassMethod = !OMD->isInstanceMethod();
-
CodeGenTypes &Types = CGM.getTypes();
llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
- std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
- MethodName, isClassMethod);
+ std::string FunctionName = getSymbolNameForMethod(OMD);
llvm::Function *Method
= llvm::Function::Create(MethodTy,
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index 1d0379afb4b5..4c4a316308ce 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/CodeGenOptions.h"
@@ -31,6 +32,7 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/UniqueVector.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/IntrinsicInst.h"
@@ -924,13 +926,6 @@ protected:
llvm::StringMap<llvm::GlobalVariable *> NSConstantStringMap;
- /// GetNameForMethod - Return a name for the given method.
- /// \param[out] NameOut - The return value.
- void GetNameForMethod(const ObjCMethodDecl *OMD,
- const ObjCContainerDecl *CD,
- SmallVectorImpl<char> &NameOut,
- bool ignoreCategoryNamespace = false);
-
/// GetMethodVarName - Return a unique constant for the given
/// selector's name. The return value has type char *.
llvm::Constant *GetMethodVarName(Selector Sel);
@@ -1085,8 +1080,8 @@ protected:
void EmitImageInfo();
public:
- CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
- CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) { }
+ CGObjCCommonMac(CodeGen::CodeGenModule &cgm)
+ : CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) {}
bool isNonFragileABI() const {
return ObjCABI == 2;
@@ -1770,6 +1765,24 @@ struct NullReturnState {
assert(RV.isScalar() &&
"NullReturnState::complete - arg not on object");
CGF.EmitARCRelease(RV.getScalarVal(), ARCImpreciseLifetime);
+ } else {
+ QualType QT = ParamDecl->getType();
+ auto *RT = QT->getAs<RecordType>();
+ if (RT && RT->getDecl()->isParamDestroyedInCallee()) {
+ RValue RV = I->getRValue(CGF);
+ QualType::DestructionKind DtorKind = QT.isDestructedType();
+ switch (DtorKind) {
+ case QualType::DK_cxx_destructor:
+ CGF.destroyCXXObject(CGF, RV.getAggregateAddress(), QT);
+ break;
+ case QualType::DK_nontrivial_c_struct:
+ CGF.destroyNonTrivialCStruct(CGF, RV.getAggregateAddress(), QT);
+ break;
+ default:
+ llvm_unreachable("unexpected dtor kind");
+ break;
+ }
+ }
}
}
}
@@ -1787,7 +1800,8 @@ struct NullReturnState {
// If we've got a scalar return, build a phi.
if (result.isScalar()) {
// Derive the null-initialization value.
- llvm::Constant *null = CGF.CGM.EmitNullConstant(resultType);
+ llvm::Value *null =
+ CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(resultType), resultType);
// If no join is necessary, just flow out.
if (!contBB) return RValue::get(null);
@@ -2246,7 +2260,7 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
// Emit a null-check if there's a consumed argument other than the receiver.
if (!RequiresNullCheck && CGM.getLangOpts().ObjCAutoRefCount && Method) {
for (const auto *ParamDecl : Method->parameters()) {
- if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (ParamDecl->isDestroyedInCallee()) {
RequiresNullCheck = true;
break;
}
@@ -3202,7 +3216,8 @@ CGObjCMac::EmitProtocolList(Twine name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end) {
// Just return null for empty protocol lists
- if (begin == end)
+ auto PDs = GetRuntimeProtocolList(begin, end);
+ if (PDs.empty())
return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
ConstantInitBuilder builder(CGM);
@@ -3215,9 +3230,9 @@ CGObjCMac::EmitProtocolList(Twine name,
auto countSlot = values.addPlaceholder();
auto refsArray = values.beginArray(ObjCTypes.ProtocolPtrTy);
- for (; begin != end; ++begin) {
- refsArray.add(GetProtocolRef(*begin));
- }
+ for (const auto *Proto : PDs)
+ refsArray.add(GetProtocolRef(Proto));
+
auto count = refsArray.size();
// This list is null terminated.
@@ -4007,15 +4022,14 @@ llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
if (OMD->isDirectMethod()) {
Method = GenerateDirectMethod(OMD, CD);
} else {
- SmallString<256> Name;
- GetNameForMethod(OMD, CD, Name);
+ auto Name = getSymbolNameForMethod(OMD);
CodeGenTypes &Types = CGM.getTypes();
llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
Method =
llvm::Function::Create(MethodTy, llvm::GlobalValue::InternalLinkage,
- Name.str(), &CGM.getModule());
+ Name, &CGM.getModule());
}
MethodDefinitions.insert(std::make_pair(OMD, Method));
@@ -4060,11 +4074,10 @@ CGObjCCommonMac::GenerateDirectMethod(const ObjCMethodDecl *OMD,
// Replace the cached function in the map.
I->second = Fn;
} else {
- SmallString<256> Name;
- GetNameForMethod(OMD, CD, Name, /*ignoreCategoryNamespace*/ true);
+ auto Name = getSymbolNameForMethod(OMD, /*include category*/ false);
Fn = llvm::Function::Create(MethodTy, llvm::GlobalValue::ExternalLinkage,
- Name.str(), &CGM.getModule());
+ Name, &CGM.getModule());
DirectMethodDefinitions.insert(std::make_pair(COMD, Fn));
}
@@ -5106,9 +5119,10 @@ std::string CGObjCCommonMac::GetSectionName(StringRef Section,
"expected the name to begin with __");
return ("." + Section.substr(2) + "$B").str();
case llvm::Triple::Wasm:
+ case llvm::Triple::GOFF:
case llvm::Triple::XCOFF:
llvm::report_fatal_error(
- "Objective-C support is unimplemented for object file format.");
+ "Objective-C support is unimplemented for object file format");
}
llvm_unreachable("Unhandled llvm::Triple::ObjectFormatType enum");
@@ -5714,21 +5728,6 @@ CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
}
-void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
- const ObjCContainerDecl *CD,
- SmallVectorImpl<char> &Name,
- bool ignoreCategoryNamespace) {
- llvm::raw_svector_ostream OS(Name);
- assert (CD && "Missing container decl in GetNameForMethod");
- OS << '\01' << (D->isInstanceMethod() ? '-' : '+')
- << '[' << CD->getName();
- if (!ignoreCategoryNamespace)
- if (const ObjCCategoryImplDecl *CID =
- dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
- OS << '(' << *CID << ')';
- OS << ' ' << D->getSelector().getAsString() << ']';
-}
-
void CGObjCMac::FinishModule() {
EmitModuleInfo();
@@ -6670,7 +6669,8 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
// This routine is called for @protocol only. So, we must build definition
// of protocol's meta-data (not a reference to it!)
- //
+ assert(!PD->isNonRuntimeProtocol() &&
+ "attempting to get a protocol ref to a static protocol.");
llvm::Constant *Init =
llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
ObjCTypes.getExternalProtocolPtrTy());
@@ -7027,6 +7027,8 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
const ObjCProtocolDecl *PD) {
llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+ assert(!PD->isNonRuntimeProtocol() &&
+ "attempting to GetOrEmit a non-runtime protocol");
if (!Entry) {
// We use the initializer as a marker of whether this is a forward
// reference or not. At module finalization we add the empty
@@ -7170,10 +7172,20 @@ llvm::Constant *
CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end) {
+ // Just return null for empty protocol lists
+ auto Protocols = GetRuntimeProtocolList(begin, end);
+ if (Protocols.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+
SmallVector<llvm::Constant *, 16> ProtocolRefs;
+ ProtocolRefs.reserve(Protocols.size());
- // Just return null for empty protocol lists
- if (begin == end)
+ for (const auto *PD : Protocols)
+ ProtocolRefs.push_back(GetProtocolRef(PD));
+
+ // If all of the protocols in the protocol list are objc_non_runtime_protocol
+ // just return null
+ if (ProtocolRefs.size() == 0)
return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
// FIXME: We shouldn't need to do this lookup here, should we?
@@ -7190,8 +7202,8 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
// A null-terminated array of protocols.
auto array = values.beginArray(ObjCTypes.ProtocolnfABIPtrTy);
- for (; begin != end; ++begin)
- array.add(GetProtocolRef(*begin)); // Implemented???
+ for (auto const &proto : ProtocolRefs)
+ array.add(proto);
auto count = array.size();
array.addNullPointer(ObjCTypes.ProtocolnfABIPtrTy);
@@ -7357,7 +7369,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
bool requiresnullCheck = false;
if (CGM.getLangOpts().ObjCAutoRefCount && method)
for (const auto *ParamDecl : method->parameters()) {
- if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (ParamDecl->isDestroyedInCallee()) {
if (!nullReturn.NullBB)
nullReturn.init(CGF, arg0);
requiresnullCheck = true;
diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp
index 39efe040302d..9bf4d83f9bc4 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -390,3 +390,13 @@ clang::CodeGen::emitObjCProtocolObject(CodeGenModule &CGM,
const ObjCProtocolDecl *protocol) {
return CGM.getObjCRuntime().GetOrEmitProtocol(protocol);
}
+
+std::string CGObjCRuntime::getSymbolNameForMethod(const ObjCMethodDecl *OMD,
+ bool includeCategoryName) {
+ std::string buffer;
+ llvm::raw_string_ostream out(buffer);
+ CGM.getCXXABI().getMangleContext().mangleObjCMethodName(OMD, out,
+ /*includePrefixByte=*/true,
+ includeCategoryName);
+ return buffer;
+}
diff --git a/clang/lib/CodeGen/CGObjCRuntime.h b/clang/lib/CodeGen/CGObjCRuntime.h
index a2c189585f7b..f56101df77b6 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/clang/lib/CodeGen/CGObjCRuntime.h
@@ -20,6 +20,7 @@
#include "CGValue.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/IdentifierTable.h" // Selector
+#include "llvm/ADT/UniqueVector.h"
namespace llvm {
class Constant;
@@ -115,6 +116,9 @@ protected:
public:
virtual ~CGObjCRuntime();
+ std::string getSymbolNameForMethod(const ObjCMethodDecl *method,
+ bool includeCategoryName = true);
+
/// Generate the function required to register all Objective-C components in
/// this compilation unit with the runtime library.
virtual llvm::Function *ModuleInitFunction() = 0;
@@ -202,6 +206,16 @@ public:
const CallArgList &CallArgs,
const ObjCMethodDecl *Method = nullptr) = 0;
+ /// Walk the list of protocol references from a class, category or
+ /// protocol to traverse the DAG formed from it's inheritance hierarchy. Find
+ /// the list of protocols that ends each walk at either a runtime
+ /// protocol or a non-runtime protocol with no parents. For the common case of
+ /// just a list of standard runtime protocols this just returns the same list
+ /// that was passed in.
+ std::vector<const ObjCProtocolDecl *>
+ GetRuntimeProtocolList(ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
/// Emit the code to return the named protocol as an object, as in a
/// \@protocol expression.
virtual llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 43cbe9c720ea..57cc2d60e2af 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -29,7 +29,6 @@
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitcode/BitcodeReader.h"
-#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
@@ -181,7 +180,7 @@ public:
UntiedCodeGen(CGF);
CodeGenFunction::JumpDest CurPoint =
CGF.getJumpDestInCurrentScope(".untied.next.");
- CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
+ CGF.EmitBranch(CGF.ReturnBlock.getBlock());
CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
CGF.Builder.GetInsertBlock());
@@ -886,8 +885,11 @@ void ReductionCodeGen::emitInitialization(
SharedType, SharedAddresses[N].first.getBaseInfo(),
CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
+ if (DRD && DRD->getInitializer())
+ (void)DefaultInit(CGF);
emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
} else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
+ (void)DefaultInit(CGF);
emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
PrivateAddr, SharedLVal.getAddress(CGF),
SharedLVal.getType());
@@ -1061,23 +1063,6 @@ CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator)
: CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager(CGM) {
- ASTContext &C = CGM.getContext();
- RecordDecl *RD = C.buildImplicitRecord("ident_t");
- QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
- RD->startDefinition();
- // reserved_1
- addFieldToRecordDecl(C, RD, KmpInt32Ty);
- // flags
- addFieldToRecordDecl(C, RD, KmpInt32Ty);
- // reserved_2
- addFieldToRecordDecl(C, RD, KmpInt32Ty);
- // reserved_3
- addFieldToRecordDecl(C, RD, KmpInt32Ty);
- // psource
- addFieldToRecordDecl(C, RD, C.VoidPtrTy);
- RD->completeDefinition();
- IdentQTy = C.getRecordType(RD);
- IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
// Initialize Types used in OpenMPIRBuilder from OMPKinds.def
@@ -1394,39 +1379,6 @@ createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
Fields.finishAndAddTo(Parent);
}
-Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
- CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
- unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
- FlagsTy FlagsKey(Flags, Reserved2Flags);
- llvm::Value *Entry = OpenMPDefaultLocMap.lookup(FlagsKey);
- if (!Entry) {
- if (!DefaultOpenMPPSource) {
- // Initialize default location for psource field of ident_t structure of
- // all ident_t objects. Format is ";file;function;line;column;;".
- // Taken from
- // https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp_str.cpp
- DefaultOpenMPPSource =
- CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
- DefaultOpenMPPSource =
- llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
- }
-
- llvm::Constant *Data[] = {
- llvm::ConstantInt::getNullValue(CGM.Int32Ty),
- llvm::ConstantInt::get(CGM.Int32Ty, Flags),
- llvm::ConstantInt::get(CGM.Int32Ty, Reserved2Flags),
- llvm::ConstantInt::getNullValue(CGM.Int32Ty), DefaultOpenMPPSource};
- llvm::GlobalValue *DefaultOpenMPLocation =
- createGlobalStruct(CGM, IdentQTy, isDefaultLocationConstant(), Data, "",
- llvm::GlobalValue::PrivateLinkage);
- DefaultOpenMPLocation->setUnnamedAddr(
- llvm::GlobalValue::UnnamedAddr::Global);
-
- OpenMPDefaultLocMap[FlagsKey] = Entry = DefaultOpenMPLocation;
- }
- return Address(Entry, Align);
-}
-
void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint) {
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
@@ -1452,70 +1404,55 @@ void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) {
}
}
+static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ SmallString<128> &Buffer) {
+ llvm::raw_svector_ostream OS(Buffer);
+ // Build debug location
+ PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
+ OS << ";" << PLoc.getFilename() << ";";
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
+ OS << FD->getQualifiedNameAsString();
+ OS << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
+ return OS.str();
+}
+
llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned Flags) {
- Flags |= OMP_IDENT_KMPC;
- // If no debug info is generated - return global default location.
+ llvm::Constant *SrcLocStr;
if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
- Loc.isInvalid())
- return getOrCreateDefaultLocation(Flags).getPointer();
-
- assert(CGF.CurFn && "No function in current CodeGenFunction.");
-
- CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
- Address LocValue = Address::invalid();
- auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
- if (I != OpenMPLocThreadIDMap.end())
- LocValue = Address(I->second.DebugLoc, Align);
-
- // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
- // GetOpenMPThreadID was called before this routine.
- if (!LocValue.isValid()) {
- // Generate "ident_t .kmpc_loc.addr;"
- Address AI = CGF.CreateMemTemp(IdentQTy, ".kmpc_loc.addr");
- auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
- Elem.second.DebugLoc = AI.getPointer();
- LocValue = AI;
-
- if (!Elem.second.ServiceInsertPt)
- setLocThreadIdInsertPt(CGF);
- CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
- CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
- CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
- CGF.getTypeSize(IdentQTy));
- }
-
- // char **psource = &.kmpc_loc_<flags>.addr.psource;
- LValue Base = CGF.MakeAddrLValue(LocValue, IdentQTy);
- auto Fields = cast<RecordDecl>(IdentQTy->getAsTagDecl())->field_begin();
- LValue PSource =
- CGF.EmitLValueForField(Base, *std::next(Fields, IdentField_PSource));
-
- llvm::Value *OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
- if (OMPDebugLoc == nullptr) {
- SmallString<128> Buffer2;
- llvm::raw_svector_ostream OS2(Buffer2);
- // Build debug location
- PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
- OS2 << ";" << PLoc.getFilename() << ";";
+ Loc.isInvalid()) {
+ SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr();
+ } else {
+ std::string FunctionName = "";
if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
- OS2 << FD->getQualifiedNameAsString();
- OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
- OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
- OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
+ FunctionName = FD->getQualifiedNameAsString();
+ PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
+ const char *FileName = PLoc.getFilename();
+ unsigned Line = PLoc.getLine();
+ unsigned Column = PLoc.getColumn();
+ SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName.c_str(), FileName,
+ Line, Column);
}
- // *psource = ";<File>;<Function>;<Line>;<Column>;;";
- CGF.EmitStoreOfScalar(OMPDebugLoc, PSource);
-
- // Our callers always pass this to a runtime function, so for
- // convenience, go ahead and return a naked pointer.
- return LocValue.getPointer();
+ unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
+ return OMPBuilder.getOrCreateIdent(SrcLocStr, llvm::omp::IdentFlag(Flags),
+ Reserved2Flags);
}
llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
SourceLocation Loc) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
+ // If the OpenMPIRBuilder is used we need to use it for all thread id calls as
+ // the clang invariants used below might be broken.
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ SmallString<128> Buffer;
+ OMPBuilder.updateToLocation(CGF.Builder.saveIP());
+ auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
+ getIdentStringFromSourceLocation(CGF, Loc, Buffer));
+ return OMPBuilder.getOrCreateThreadID(
+ OMPBuilder.getOrCreateIdent(SrcLocStr));
+ }
llvm::Value *ThreadID = nullptr;
// Check whether we've already cached a load of the thread id in this
@@ -1589,10 +1526,11 @@ void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
FunctionUDMMap.erase(I);
}
LastprivateConditionalToTypes.erase(CGF.CurFn);
+ FunctionToUntiedTaskStackMap.erase(CGF.CurFn);
}
llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
- return IdentTy->getPointerTo();
+ return OMPBuilder.IdentPtr;
}
llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
@@ -2160,6 +2098,14 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
+
+ // Ensure we do not inline the function. This is trivially true for the ones
+ // passed to __kmpc_fork_call but the ones calles in serialized regions
+ // could be inlined. This is not a perfect but it is closer to the invariant
+ // we want, namely, every data environment starts with a new function.
+ // TODO: We should pass the if condition to the runtime function and do the
+ // handling there. Much cleaner code.
+ OutlinedFn->addFnAttr(llvm::Attribute::NoInline);
RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
// __kmpc_end_serialized_parallel(&Loc, GTid);
@@ -2322,7 +2268,7 @@ void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
if (!CGF.HaveInsertPoint())
return;
if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
- OMPBuilder.CreateTaskyield(CGF.Builder);
+ OMPBuilder.createTaskyield(CGF.Builder);
} else {
// Build call __kmpc_omp_taskyield(loc, thread_id, 0);
llvm::Value *Args[] = {
@@ -2577,7 +2523,7 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
- CGF.Builder.restoreIP(OMPBuilder.CreateBarrier(
+ CGF.Builder.restoreIP(OMPBuilder.createBarrier(
CGF.Builder, Kind, ForceSimpleCall, EmitChecks));
return;
}
@@ -2939,7 +2885,7 @@ void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
SourceLocation Loc, llvm::AtomicOrdering AO) {
if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
- OMPBuilder.CreateFlush(CGF.Builder);
+ OMPBuilder.createFlush(CGF.Builder);
} else {
if (!CGF.HaveInsertPoint())
return;
@@ -3003,20 +2949,23 @@ void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
// If we are emitting code for a target, the entry is already initialized,
// only has to be registered.
if (CGM.getLangOpts().OpenMPIsDevice) {
- if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error,
- "Unable to find target region on line '%0' in the device code.");
- CGM.getDiags().Report(DiagID) << LineNum;
- return;
- }
+ // This could happen if the device compilation is invoked standalone.
+ if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum))
+ initializeTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
+ OffloadingEntriesNum);
auto &Entry =
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
- assert(Entry.isValid() && "Entry not initialized!");
Entry.setAddress(Addr);
Entry.setID(ID);
Entry.setFlags(Flags);
} else {
+ if (Flags ==
+ OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion &&
+ hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
+ /*IgnoreAddressId*/ true))
+ return;
+ assert(!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) &&
+ "Target region entry already registered!");
OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
++OffloadingEntriesNum;
@@ -3024,8 +2973,8 @@ void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
}
bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
- unsigned DeviceID, unsigned FileID, StringRef ParentName,
- unsigned LineNum) const {
+ unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum,
+ bool IgnoreAddressId) const {
auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
if (PerDevice == OffloadEntriesTargetRegion.end())
return false;
@@ -3039,7 +2988,8 @@ bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
if (PerLine == PerParentName->second.end())
return false;
// Fail if this entry is already registered.
- if (PerLine->second.getAddress() || PerLine->second.getID())
+ if (!IgnoreAddressId &&
+ (PerLine->second.getAddress() || PerLine->second.getID()))
return false;
return true;
}
@@ -3071,9 +3021,10 @@ void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage) {
if (CGM.getLangOpts().OpenMPIsDevice) {
+ // This could happen if the device compilation is invoked standalone.
+ if (!hasDeviceGlobalVarEntryInfo(VarName))
+ initializeDeviceGlobalVarEntryInfo(VarName, Flags, OffloadingEntriesNum);
auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
- assert(Entry.isValid() && Entry.getFlags() == Flags &&
- "Entry not initialized!");
assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
"Resetting with the new address.");
if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
@@ -3129,11 +3080,12 @@ void CGOpenMPRuntime::createOffloadEntry(
llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
- llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
- llvm::ConstantInt::get(CGM.SizeTy, Size),
- llvm::ConstantInt::get(CGM.Int32Ty, Flags),
- llvm::ConstantInt::get(CGM.Int32Ty, 0)};
+ llvm::Constant *Data[] = {
+ llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(ID, CGM.VoidPtrTy),
+ llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(Str, CGM.Int8PtrTy),
+ llvm::ConstantInt::get(CGM.SizeTy, Size),
+ llvm::ConstantInt::get(CGM.Int32Ty, Flags),
+ llvm::ConstantInt::get(CGM.Int32Ty, 0)};
std::string EntryName = getName({"omp_offloading", "entry", ""});
llvm::GlobalVariable *Entry = createGlobalStruct(
CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
@@ -3433,14 +3385,29 @@ struct PrivateHelpersTy {
const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit)
: OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
PrivateElemInit(PrivateElemInit) {}
+ PrivateHelpersTy(const VarDecl *Original) : Original(Original) {}
const Expr *OriginalRef = nullptr;
const VarDecl *Original = nullptr;
const VarDecl *PrivateCopy = nullptr;
const VarDecl *PrivateElemInit = nullptr;
+ bool isLocalPrivate() const {
+ return !OriginalRef && !PrivateCopy && !PrivateElemInit;
+ }
};
typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
} // anonymous namespace
+static bool isAllocatableDecl(const VarDecl *VD) {
+ const VarDecl *CVD = VD->getCanonicalDecl();
+ if (!CVD->hasAttr<OMPAllocateDeclAttr>())
+ return false;
+ const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
+ // Use the default allocation.
+ return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
+ AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
+ !AA->getAllocator());
+}
+
static RecordDecl *
createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
if (!Privates.empty()) {
@@ -3453,6 +3420,14 @@ createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
for (const auto &Pair : Privates) {
const VarDecl *VD = Pair.second.Original;
QualType Type = VD->getType().getNonReferenceType();
+ // If the private variable is a local variable with lvalue ref type,
+ // allocate the pointer instead of the pointee type.
+ if (Pair.second.isLocalPrivate()) {
+ if (VD->getType()->isLValueReferenceType())
+ Type = C.getPointerType(Type);
+ if (isAllocatableDecl(VD))
+ Type = C.getPointerType(Type);
+ }
FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
if (VD->hasAttrs()) {
for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
@@ -3706,10 +3681,7 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
/// \endcode
static llvm::Value *
emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
- ArrayRef<const Expr *> PrivateVars,
- ArrayRef<const Expr *> FirstprivateVars,
- ArrayRef<const Expr *> LastprivateVars,
- QualType PrivatesQTy,
+ const OMPTaskDataTy &Data, QualType PrivatesQTy,
ArrayRef<PrivateDataTy> Privates) {
ASTContext &C = CGM.getContext();
FunctionArgList Args;
@@ -3718,9 +3690,9 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
C.getPointerType(PrivatesQTy).withConst().withRestrict(),
ImplicitParamDecl::Other);
Args.push_back(&TaskPrivatesArg);
- llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
+ llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, unsigned> PrivateVarsPos;
unsigned Counter = 1;
- for (const Expr *E : PrivateVars) {
+ for (const Expr *E : Data.PrivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getPointerType(C.getPointerType(E->getType()))
@@ -3731,7 +3703,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
PrivateVarsPos[VD] = Counter;
++Counter;
}
- for (const Expr *E : FirstprivateVars) {
+ for (const Expr *E : Data.FirstprivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getPointerType(C.getPointerType(E->getType()))
@@ -3742,7 +3714,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
PrivateVarsPos[VD] = Counter;
++Counter;
}
- for (const Expr *E : LastprivateVars) {
+ for (const Expr *E : Data.LastprivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getPointerType(C.getPointerType(E->getType()))
@@ -3753,6 +3725,19 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
PrivateVarsPos[VD] = Counter;
++Counter;
}
+ for (const VarDecl *VD : Data.PrivateLocals) {
+ QualType Ty = VD->getType().getNonReferenceType();
+ if (VD->getType()->isLValueReferenceType())
+ Ty = C.getPointerType(Ty);
+ if (isAllocatableDecl(VD))
+ Ty = C.getPointerType(Ty);
+ Args.push_back(ImplicitParamDecl::Create(
+ C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.getPointerType(C.getPointerType(Ty)).withConst().withRestrict(),
+ ImplicitParamDecl::Other));
+ PrivateVarsPos[VD] = Counter;
+ ++Counter;
+ }
const auto &TaskPrivatesMapFnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *TaskPrivatesMapTy =
@@ -3813,9 +3798,9 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
bool IsTargetTask =
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
isOpenMPTargetExecutionDirective(D.getDirectiveKind());
- // For target-based directives skip 3 firstprivate arrays BasePointersArray,
- // PointersArray and SizesArray. The original variables for these arrays are
- // not captured and we get their addresses explicitly.
+ // For target-based directives skip 4 firstprivate arrays BasePointersArray,
+ // PointersArray, SizesArray, and MappersArray. The original variables for
+ // these arrays are not captured and we get their addresses explicitly.
if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) ||
(IsTargetTask && KmpTaskSharedsPtr.isValid())) {
SrcBase = CGF.MakeAddrLValue(
@@ -3825,6 +3810,11 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
}
FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
for (const PrivateDataTy &Pair : Privates) {
+ // Do not initialize private locals.
+ if (Pair.second.isLocalPrivate()) {
+ ++FI;
+ continue;
+ }
const VarDecl *VD = Pair.second.PrivateCopy;
const Expr *Init = VD->getAnyInitializer();
if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
@@ -3833,7 +3823,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
const VarDecl *OriginalVD = Pair.second.Original;
// Check if the variable is the target-based BasePointersArray,
- // PointersArray or SizesArray.
+ // PointersArray, SizesArray, or MappersArray.
LValue SharedRefLValue;
QualType Type = PrivateLValue.getType();
const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
@@ -3915,6 +3905,8 @@ static bool checkInitIsRequired(CodeGenFunction &CGF,
ArrayRef<PrivateDataTy> Privates) {
bool InitRequired = false;
for (const PrivateDataTy &Pair : Privates) {
+ if (Pair.second.isLocalPrivate())
+ continue;
const VarDecl *VD = Pair.second.PrivateCopy;
const Expr *Init = VD->getAnyInitializer();
InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
@@ -4008,16 +4000,16 @@ emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
/// Checks if destructor function is required to be generated.
/// \return true if cleanups are required, false otherwise.
static bool
-checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
- bool NeedsCleanup = false;
- auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
- const auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
- for (const FieldDecl *FD : PrivateRD->fields()) {
- NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType();
- if (NeedsCleanup)
- break;
+checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD,
+ ArrayRef<PrivateDataTy> Privates) {
+ for (const PrivateDataTy &P : Privates) {
+ if (P.second.isLocalPrivate())
+ continue;
+ QualType Ty = P.second.Original->getType().getNonReferenceType();
+ if (Ty.isDestructedType())
+ return true;
}
- return NeedsCleanup;
+ return false;
}
namespace {
@@ -4187,9 +4179,16 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
/*PrivateElemInit=*/nullptr));
++I;
}
- llvm::stable_sort(Privates, [](PrivateDataTy L, PrivateDataTy R) {
- return L.first > R.first;
- });
+ for (const VarDecl *VD : Data.PrivateLocals) {
+ if (isAllocatableDecl(VD))
+ Privates.emplace_back(CGM.getPointerAlign(), PrivateHelpersTy(VD));
+ else
+ Privates.emplace_back(C.getDeclAlign(VD), PrivateHelpersTy(VD));
+ }
+ llvm::stable_sort(Privates,
+ [](const PrivateDataTy &L, const PrivateDataTy &R) {
+ return L.first > R.first;
+ });
QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Build type kmp_routine_entry_t (if not built yet).
emitKmpRoutineEntryT(KmpInt32Ty);
@@ -4231,9 +4230,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
std::next(TaskFunction->arg_begin(), 3)->getType();
if (!Privates.empty()) {
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
- TaskPrivatesMap = emitTaskPrivateMappingFunction(
- CGM, Loc, Data.PrivateVars, Data.FirstprivateVars, Data.LastprivateVars,
- FI->getType(), Privates);
+ TaskPrivatesMap =
+ emitTaskPrivateMappingFunction(CGM, Loc, Data, FI->getType(), Privates);
TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
TaskPrivatesMap, TaskPrivatesMapTy);
} else {
@@ -4263,7 +4261,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = Data.Tied ? TiedFlag : 0;
bool NeedsCleanup = false;
if (!Privates.empty()) {
- NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD);
+ NeedsCleanup =
+ checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD, Privates);
if (NeedsCleanup)
Flags = Flags | DestructorsFlag;
}
@@ -6194,7 +6193,7 @@ void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
return;
if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
- OMPBuilder.CreateTaskwait(CGF.Builder);
+ OMPBuilder.createTaskwait(CGF.Builder);
} else {
// Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
// global_tid);
@@ -6477,6 +6476,8 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
OutlinedFn->setDSOLocal(false);
+ if (CGM.getTriple().isAMDGCN())
+ OutlinedFn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
} else {
std::string Name = getName({EntryFnName, "region_id"});
OutlinedFnID = new llvm::GlobalVariable(
@@ -7040,6 +7041,13 @@ public:
/// Close is a hint to the runtime to allocate memory close to
/// the target device.
OMP_MAP_CLOSE = 0x400,
+ /// 0x800 is reserved for compatibility with XLC.
+ /// Produce a runtime error if the data is not already allocated.
+ OMP_MAP_PRESENT = 0x1000,
+ /// Signal that the runtime library should use args as an array of
+ /// descriptor_dim pointers and use args_size as dims. Used when we have
+ /// non-contiguous list items in target update directive
+ OMP_MAP_NON_CONTIG = 0x100000000000,
/// The 16 MSBs of the flags indicate whether the entry is member of some
/// struct/class.
OMP_MAP_MEMBER_OF = 0xffff000000000000,
@@ -7055,6 +7063,23 @@ public:
return Offset;
}
+ /// Class that holds debugging information for a data mapping to be passed to
+ /// the runtime library.
+ class MappingExprInfo {
+ /// The variable declaration used for the data mapping.
+ const ValueDecl *MapDecl = nullptr;
+ /// The original expression used in the map clause, or null if there is
+ /// none.
+ const Expr *MapExpr = nullptr;
+
+ public:
+ MappingExprInfo(const ValueDecl *MapDecl, const Expr *MapExpr = nullptr)
+ : MapDecl(MapDecl), MapExpr(MapExpr) {}
+
+ const ValueDecl *getMapDecl() const { return MapDecl; }
+ const Expr *getMapExpr() const { return MapExpr; }
+ };
+
/// Class that associates information with a base pointer to be passed to the
/// runtime library.
class BasePointerInfo {
@@ -7072,9 +7097,52 @@ public:
void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
};
+ using MapExprsArrayTy = SmallVector<MappingExprInfo, 4>;
using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
+ using MapMappersArrayTy = SmallVector<const ValueDecl *, 4>;
+ using MapDimArrayTy = SmallVector<uint64_t, 4>;
+ using MapNonContiguousArrayTy = SmallVector<MapValuesArrayTy, 4>;
+
+ /// This structure contains combined information generated for mappable
+ /// clauses, including base pointers, pointers, sizes, map types, user-defined
+ /// mappers, and non-contiguous information.
+ struct MapCombinedInfoTy {
+ struct StructNonContiguousInfo {
+ bool IsNonContiguous = false;
+ MapDimArrayTy Dims;
+ MapNonContiguousArrayTy Offsets;
+ MapNonContiguousArrayTy Counts;
+ MapNonContiguousArrayTy Strides;
+ };
+ MapExprsArrayTy Exprs;
+ MapBaseValuesArrayTy BasePointers;
+ MapValuesArrayTy Pointers;
+ MapValuesArrayTy Sizes;
+ MapFlagsArrayTy Types;
+ MapMappersArrayTy Mappers;
+ StructNonContiguousInfo NonContigInfo;
+
+ /// Append arrays in \a CurInfo.
+ void append(MapCombinedInfoTy &CurInfo) {
+ Exprs.append(CurInfo.Exprs.begin(), CurInfo.Exprs.end());
+ BasePointers.append(CurInfo.BasePointers.begin(),
+ CurInfo.BasePointers.end());
+ Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
+ Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
+ Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
+ Mappers.append(CurInfo.Mappers.begin(), CurInfo.Mappers.end());
+ NonContigInfo.Dims.append(CurInfo.NonContigInfo.Dims.begin(),
+ CurInfo.NonContigInfo.Dims.end());
+ NonContigInfo.Offsets.append(CurInfo.NonContigInfo.Offsets.begin(),
+ CurInfo.NonContigInfo.Offsets.end());
+ NonContigInfo.Counts.append(CurInfo.NonContigInfo.Counts.begin(),
+ CurInfo.NonContigInfo.Counts.end());
+ NonContigInfo.Strides.append(CurInfo.NonContigInfo.Strides.begin(),
+ CurInfo.NonContigInfo.Strides.end());
+ }
+ };
/// Map between a struct and the its lowest & highest elements which have been
/// mapped.
@@ -7086,6 +7154,7 @@ public:
std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
0, Address::invalid()};
Address Base = Address::invalid();
+ bool IsArraySection = false;
};
private:
@@ -7094,19 +7163,26 @@ private:
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
ArrayRef<OpenMPMapModifierKind> MapModifiers;
+ ArrayRef<OpenMPMotionModifierKind> MotionModifiers;
bool ReturnDevicePointer = false;
bool IsImplicit = false;
+ const ValueDecl *Mapper = nullptr;
+ const Expr *VarRef = nullptr;
bool ForDeviceAddr = false;
MapInfo() = default;
MapInfo(
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers, bool ReturnDevicePointer,
- bool IsImplicit, bool ForDeviceAddr = false)
+ ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ bool ReturnDevicePointer, bool IsImplicit,
+ const ValueDecl *Mapper = nullptr, const Expr *VarRef = nullptr,
+ bool ForDeviceAddr = false)
: Components(Components), MapType(MapType), MapModifiers(MapModifiers),
+ MotionModifiers(MotionModifiers),
ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit),
- ForDeviceAddr(ForDeviceAddr) {}
+ Mapper(Mapper), VarRef(VarRef), ForDeviceAddr(ForDeviceAddr) {}
};
/// If use_device_ptr or use_device_addr is used on a decl which is a struct
@@ -7224,7 +7300,8 @@ private:
/// expression.
OpenMPOffloadMappingFlags getMapTypeBits(
OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
- bool IsImplicit, bool AddPtrFlag, bool AddIsTargetParamFlag) const {
+ ArrayRef<OpenMPMotionModifierKind> MotionModifiers, bool IsImplicit,
+ bool AddPtrFlag, bool AddIsTargetParamFlag, bool IsNonContiguous) const {
OpenMPOffloadMappingFlags Bits =
IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
switch (MapType) {
@@ -7260,6 +7337,14 @@ private:
if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_close)
!= MapModifiers.end())
Bits |= OMP_MAP_CLOSE;
+ if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_present)
+ != MapModifiers.end())
+ Bits |= OMP_MAP_PRESENT;
+ if (llvm::find(MotionModifiers, OMPC_MOTION_MODIFIER_present)
+ != MotionModifiers.end())
+ Bits |= OMP_MAP_PRESENT;
+ if (IsNonContiguous)
+ Bits |= OMP_MAP_NON_CONTIG;
return Bits;
}
@@ -7302,17 +7387,19 @@ private:
return ConstLength.getSExtValue() != 1;
}
- /// Generate the base pointers, section pointers, sizes and map type
- /// bits for the provided map type, map modifier, and expression components.
+ /// Generate the base pointers, section pointers, sizes, map type bits, and
+ /// user-defined mappers (all included in \a CombinedInfo) for the provided
+ /// map type, map or motion modifiers, and expression components.
/// \a IsFirstComponent should be set to true if the provided set of
/// components is the first associated with a capture.
void generateInfoForComponentList(
OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
- MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
- MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
- StructRangeInfoTy &PartialStruct, bool IsFirstComponentList,
- bool IsImplicit, bool ForDeviceAddr = false,
+ MapCombinedInfoTy &CombinedInfo, StructRangeInfoTy &PartialStruct,
+ bool IsFirstComponentList, bool IsImplicit,
+ const ValueDecl *Mapper = nullptr, bool ForDeviceAddr = false,
+ const ValueDecl *BaseDecl = nullptr, const Expr *MapExpr = nullptr,
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
OverlappedElements = llvm::None) const {
// The following summarizes what has to be generated for each map and the
@@ -7351,6 +7438,8 @@ private:
// &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM
//
// map(p[1:24])
+ // &p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM | PTR_AND_OBJ
+ // in unified shared memory mode or for local pointers
// p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
//
// map(s)
@@ -7486,6 +7575,7 @@ private:
// Track if the map information being generated is the first for a list of
// components.
bool IsExpressionFirstInfo = true;
+ bool FirstPointerInComplexData = false;
Address BP = Address::invalid();
const Expr *AssocExpr = I->getAssociatedExpression();
const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
@@ -7528,10 +7618,15 @@ private:
QualType Ty =
I->getAssociatedDeclaration()->getType().getNonReferenceType();
if (Ty->isAnyPointerType() && std::next(I) != CE) {
- BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
-
- // We do not need to generate individual map information for the
- // pointer, it can be associated with the combined storage.
+ // No need to generate individual map information for the pointer, it
+ // can be associated with the combined storage if shared memory mode is
+ // active or the base declaration is not global variable.
+ const auto *VD = dyn_cast<VarDecl>(I->getAssociatedDeclaration());
+ if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
+ !VD || VD->hasLocalStorage())
+ BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
+ else
+ FirstPointerInComplexData = true;
++I;
}
}
@@ -7561,14 +7656,31 @@ private:
// whether we are dealing with a member of a declared struct.
const MemberExpr *EncounteredME = nullptr;
+ // Track for the total number of dimension. Start from one for the dummy
+ // dimension.
+ uint64_t DimSize = 1;
+
+ bool IsNonContiguous = CombinedInfo.NonContigInfo.IsNonContiguous;
+
for (; I != CE; ++I) {
// If the current component is member of a struct (parent struct) mark it.
if (!EncounteredME) {
EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
// If we encounter a PTR_AND_OBJ entry from now on it should be marked
// as MEMBER_OF the parent struct.
- if (EncounteredME)
+ if (EncounteredME) {
ShouldBeMemberOf = true;
+ // Do not emit as complex pointer if this is actually not array-like
+ // expression.
+ if (FirstPointerInComplexData) {
+ QualType Ty = std::prev(I)
+ ->getAssociatedDeclaration()
+ ->getType()
+ .getNonReferenceType();
+ BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
+ FirstPointerInComplexData = false;
+ }
+ }
}
auto Next = std::next(I);
@@ -7579,9 +7691,18 @@ private:
// becomes the base address for the following components.
// A final array section, is one whose length can't be proved to be one.
+ // If the map item is non-contiguous then we don't treat any array section
+ // as final array section.
bool IsFinalArraySection =
+ !IsNonContiguous &&
isFinalArraySectionExpression(I->getAssociatedExpression());
+ // If we have a declaration for the mapping use that, otherwise use
+ // the base declaration of the map clause.
+ const ValueDecl *MapDecl = (I->getAssociatedDeclaration())
+ ? I->getAssociatedDeclaration()
+ : BaseDecl;
+
// Get information on whether the element is a pointer. Have to do a
// special treatment for array sections given that they are built-in
// types.
@@ -7597,7 +7718,10 @@ private:
.getCanonicalType()
->isAnyPointerType()) ||
I->getAssociatedExpression()->getType()->isAnyPointerType();
- bool IsNonDerefPointer = IsPointer && !UO && !BO;
+ bool IsNonDerefPointer = IsPointer && !UO && !BO && !IsNonContiguous;
+
+ if (OASE)
+ ++DimSize;
if (Next == CE || IsNonDerefPointer || IsFinalArraySection) {
// If this is not the last component, we expect the pointer to be
@@ -7606,6 +7730,7 @@ private:
isa<MemberExpr>(Next->getAssociatedExpression()) ||
isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) ||
isa<UnaryOperator>(Next->getAssociatedExpression()) ||
isa<BinaryOperator>(Next->getAssociatedExpression())) &&
"Unexpected expression");
@@ -7651,9 +7776,9 @@ private:
// Emit data for non-overlapped data.
OpenMPOffloadMappingFlags Flags =
OMP_MAP_MEMBER_OF |
- getMapTypeBits(MapType, MapModifiers, IsImplicit,
+ getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
/*AddPtrFlag=*/false,
- /*AddIsTargetParamFlag=*/false);
+ /*AddIsTargetParamFlag=*/false, IsNonContiguous);
LB = BP;
llvm::Value *Size = nullptr;
// Do bitcopy of all non-overlapped structure elements.
@@ -7672,39 +7797,57 @@ private:
break;
}
}
- BasePointers.push_back(BP.getPointer());
- Pointers.push_back(LB.getPointer());
- Sizes.push_back(CGF.Builder.CreateIntCast(Size, CGF.Int64Ty,
- /*isSigned=*/true));
- Types.push_back(Flags);
+ assert(Size && "Failed to determine structure size");
+ CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
+ CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.Pointers.push_back(LB.getPointer());
+ CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
+ Size, CGF.Int64Ty, /*isSigned=*/true));
+ CombinedInfo.Types.push_back(Flags);
+ CombinedInfo.Mappers.push_back(nullptr);
+ CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
+ : 1);
LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
}
- BasePointers.push_back(BP.getPointer());
- Pointers.push_back(LB.getPointer());
+ CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
+ CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.Pointers.push_back(LB.getPointer());
Size = CGF.Builder.CreatePtrDiff(
CGF.EmitCastToVoidPtr(
CGF.Builder.CreateConstGEP(HB, 1).getPointer()),
CGF.EmitCastToVoidPtr(LB.getPointer()));
- Sizes.push_back(
+ CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
- Types.push_back(Flags);
+ CombinedInfo.Types.push_back(Flags);
+ CombinedInfo.Mappers.push_back(nullptr);
+ CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
+ : 1);
break;
}
llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
- if (!IsMemberPointerOrAddr) {
- BasePointers.push_back(BP.getPointer());
- Pointers.push_back(LB.getPointer());
- Sizes.push_back(
+ if (!IsMemberPointerOrAddr ||
+ (Next == CE && MapType != OMPC_MAP_unknown)) {
+ CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
+ CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.Pointers.push_back(LB.getPointer());
+ CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
+ CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
+ : 1);
+
+ // If Mapper is valid, the last component inherits the mapper.
+ bool HasMapper = Mapper && Next == CE;
+ CombinedInfo.Mappers.push_back(HasMapper ? Mapper : nullptr);
// We need to add a pointer flag for each map that comes from the
// same expression except for the first one. We also need to signal
// this map is the first one that relates with the current capture
// (there is a set of entries for each capture).
OpenMPOffloadMappingFlags Flags = getMapTypeBits(
- MapType, MapModifiers, IsImplicit,
- !IsExpressionFirstInfo || RequiresReference,
- IsCaptureFirstInfo && !RequiresReference);
+ MapType, MapModifiers, MotionModifiers, IsImplicit,
+ !IsExpressionFirstInfo || RequiresReference ||
+ FirstPointerInComplexData,
+ IsCaptureFirstInfo && !RequiresReference, IsNonContiguous);
if (!IsExpressionFirstInfo) {
// If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
@@ -7723,7 +7866,7 @@ private:
}
}
- Types.push_back(Flags);
+ CombinedInfo.Types.push_back(Flags);
}
// If we have encountered a member expression so far, keep track of the
@@ -7752,6 +7895,10 @@ private:
}
}
+ // Need to emit combined struct for array sections.
+ if (IsFinalArraySection || IsNonContiguous)
+ PartialStruct.IsArraySection = true;
+
// If we have a final array section, we are done with this expression.
if (IsFinalArraySection)
break;
@@ -7762,8 +7909,192 @@ private:
IsExpressionFirstInfo = false;
IsCaptureFirstInfo = false;
+ FirstPointerInComplexData = false;
+ } else if (FirstPointerInComplexData) {
+ QualType Ty = Components.rbegin()
+ ->getAssociatedDeclaration()
+ ->getType()
+ .getNonReferenceType();
+ BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
+ FirstPointerInComplexData = false;
}
}
+
+ if (!IsNonContiguous)
+ return;
+
+ const ASTContext &Context = CGF.getContext();
+
+ // For supporting stride in array section, we need to initialize the first
+ // dimension size as 1, first offset as 0, and first count as 1
+ MapValuesArrayTy CurOffsets = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 0)};
+ MapValuesArrayTy CurCounts = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)};
+ MapValuesArrayTy CurStrides;
+ MapValuesArrayTy DimSizes{llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)};
+ uint64_t ElementTypeSize;
+
+ // Collect Size information for each dimension and get the element size as
+ // the first Stride. For example, for `int arr[10][10]`, the DimSizes
+ // should be [10, 10] and the first stride is 4 btyes.
+ for (const OMPClauseMappableExprCommon::MappableComponent &Component :
+ Components) {
+ const Expr *AssocExpr = Component.getAssociatedExpression();
+ const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
+
+ if (!OASE)
+ continue;
+
+ QualType Ty = OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ auto *CAT = Context.getAsConstantArrayType(Ty);
+ auto *VAT = Context.getAsVariableArrayType(Ty);
+
+ // We need all the dimension size except for the last dimension.
+ assert((VAT || CAT || &Component == &*Components.begin()) &&
+ "Should be either ConstantArray or VariableArray if not the "
+ "first Component");
+
+ // Get element size if CurStrides is empty.
+ if (CurStrides.empty()) {
+ const Type *ElementType = nullptr;
+ if (CAT)
+ ElementType = CAT->getElementType().getTypePtr();
+ else if (VAT)
+ ElementType = VAT->getElementType().getTypePtr();
+ else
+ assert(&Component == &*Components.begin() &&
+ "Only expect pointer (non CAT or VAT) when this is the "
+ "first Component");
+ // If ElementType is null, then it means the base is a pointer
+ // (neither CAT nor VAT) and we'll attempt to get ElementType again
+ // for next iteration.
+ if (ElementType) {
+ // For the case that having pointer as base, we need to remove one
+ // level of indirection.
+ if (&Component != &*Components.begin())
+ ElementType = ElementType->getPointeeOrArrayElementType();
+ ElementTypeSize =
+ Context.getTypeSizeInChars(ElementType).getQuantity();
+ CurStrides.push_back(
+ llvm::ConstantInt::get(CGF.Int64Ty, ElementTypeSize));
+ }
+ }
+ // Get dimension value except for the last dimension since we don't need
+ // it.
+ if (DimSizes.size() < Components.size() - 1) {
+ if (CAT)
+ DimSizes.push_back(llvm::ConstantInt::get(
+ CGF.Int64Ty, CAT->getSize().getZExtValue()));
+ else if (VAT)
+ DimSizes.push_back(CGF.Builder.CreateIntCast(
+ CGF.EmitScalarExpr(VAT->getSizeExpr()), CGF.Int64Ty,
+ /*IsSigned=*/false));
+ }
+ }
+
+ // Skip the dummy dimension since we have already have its information.
+ auto DI = DimSizes.begin() + 1;
+ // Product of dimension.
+ llvm::Value *DimProd =
+ llvm::ConstantInt::get(CGF.CGM.Int64Ty, ElementTypeSize);
+
+ // Collect info for non-contiguous. Notice that offset, count, and stride
+ // are only meaningful for array-section, so we insert a null for anything
+ // other than array-section.
+ // Also, the size of offset, count, and stride are not the same as
+ // pointers, base_pointers, sizes, or dims. Instead, the size of offset,
+ // count, and stride are the same as the number of non-contiguous
+ // declaration in target update to/from clause.
+ for (const OMPClauseMappableExprCommon::MappableComponent &Component :
+ Components) {
+ const Expr *AssocExpr = Component.getAssociatedExpression();
+
+ if (const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr)) {
+ llvm::Value *Offset = CGF.Builder.CreateIntCast(
+ CGF.EmitScalarExpr(AE->getIdx()), CGF.Int64Ty,
+ /*isSigned=*/false);
+ CurOffsets.push_back(Offset);
+ CurCounts.push_back(llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/1));
+ CurStrides.push_back(CurStrides.back());
+ continue;
+ }
+
+ const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
+
+ if (!OASE)
+ continue;
+
+ // Offset
+ const Expr *OffsetExpr = OASE->getLowerBound();
+ llvm::Value *Offset = nullptr;
+ if (!OffsetExpr) {
+ // If offset is absent, then we just set it to zero.
+ Offset = llvm::ConstantInt::get(CGF.Int64Ty, 0);
+ } else {
+ Offset = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(OffsetExpr),
+ CGF.Int64Ty,
+ /*isSigned=*/false);
+ }
+ CurOffsets.push_back(Offset);
+
+ // Count
+ const Expr *CountExpr = OASE->getLength();
+ llvm::Value *Count = nullptr;
+ if (!CountExpr) {
+ // In Clang, once a high dimension is an array section, we construct all
+ // the lower dimension as array section, however, for case like
+ // arr[0:2][2], Clang construct the inner dimension as an array section
+ // but it actually is not in an array section form according to spec.
+ if (!OASE->getColonLocFirst().isValid() &&
+ !OASE->getColonLocSecond().isValid()) {
+ Count = llvm::ConstantInt::get(CGF.Int64Ty, 1);
+ } else {
+ // OpenMP 5.0, 2.1.5 Array Sections, Description.
+ // When the length is absent it defaults to ⌈(size −
+ // lower-bound)/stride⌉, where size is the size of the array
+ // dimension.
+ const Expr *StrideExpr = OASE->getStride();
+ llvm::Value *Stride =
+ StrideExpr
+ ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr),
+ CGF.Int64Ty, /*isSigned=*/false)
+ : nullptr;
+ if (Stride)
+ Count = CGF.Builder.CreateUDiv(
+ CGF.Builder.CreateNUWSub(*DI, Offset), Stride);
+ else
+ Count = CGF.Builder.CreateNUWSub(*DI, Offset);
+ }
+ } else {
+ Count = CGF.EmitScalarExpr(CountExpr);
+ }
+ Count = CGF.Builder.CreateIntCast(Count, CGF.Int64Ty, /*isSigned=*/false);
+ CurCounts.push_back(Count);
+
+ // Stride_n' = Stride_n * (D_0 * D_1 ... * D_n-1) * Unit size
+ // Take `int arr[5][5][5]` and `arr[0:2:2][1:2:1][0:2:2]` as an example:
+ // Offset Count Stride
+ // D0 0 1 4 (int) <- dummy dimension
+ // D1 0 2 8 (2 * (1) * 4)
+ // D2 1 2 20 (1 * (1 * 5) * 4)
+ // D3 0 2 200 (2 * (1 * 5 * 4) * 4)
+ const Expr *StrideExpr = OASE->getStride();
+ llvm::Value *Stride =
+ StrideExpr
+ ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr),
+ CGF.Int64Ty, /*isSigned=*/false)
+ : nullptr;
+ DimProd = CGF.Builder.CreateNUWMul(DimProd, *(DI - 1));
+ if (Stride)
+ CurStrides.push_back(CGF.Builder.CreateNUWMul(DimProd, Stride));
+ else
+ CurStrides.push_back(DimProd);
+ if (DI != DimSizes.end())
+ ++DI;
+ }
+
+ CombinedInfo.NonContigInfo.Offsets.push_back(CurOffsets);
+ CombinedInfo.NonContigInfo.Counts.push_back(CurCounts);
+ CombinedInfo.NonContigInfo.Strides.push_back(CurStrides);
}
/// Return the adjusted map modifiers if the declaration a capture refers to
@@ -7896,7 +8227,7 @@ public:
// Extract device pointer clause information.
for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
for (auto L : C->component_lists())
- DevPointersMap[L.first].push_back(L.second);
+ DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
}
/// Constructor for the declare mapper directive.
@@ -7906,15 +8237,23 @@ public:
/// Generate code for the combined entry if we have a partially mapped struct
/// and take care of the mapping flags of the arguments corresponding to
/// individual struct members.
- void emitCombinedEntry(MapBaseValuesArrayTy &BasePointers,
- MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
- MapFlagsArrayTy &Types, MapFlagsArrayTy &CurTypes,
- const StructRangeInfoTy &PartialStruct) const {
+ void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
+ MapFlagsArrayTy &CurTypes,
+ const StructRangeInfoTy &PartialStruct,
+ const ValueDecl *VD = nullptr,
+ bool NotTargetParams = true) const {
+ if (CurTypes.size() == 1 &&
+ ((CurTypes.back() & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF) &&
+ !PartialStruct.IsArraySection)
+ return;
+ CombinedInfo.Exprs.push_back(VD);
// Base is the base of the struct
- BasePointers.push_back(PartialStruct.Base.getPointer());
+ CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
// Pointer is the address of the lowest element
llvm::Value *LB = PartialStruct.LowestElem.second.getPointer();
- Pointers.push_back(LB);
+ CombinedInfo.Pointers.push_back(LB);
+ // There should not be a mapper for a combined entry.
+ CombinedInfo.Mappers.push_back(nullptr);
// Size is (addr of {highest+1} element) - (addr of lowest element)
llvm::Value *HB = PartialStruct.HighestElem.second.getPointer();
llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(HB, /*Idx0=*/1);
@@ -7923,28 +8262,39 @@ public:
llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
/*isSigned=*/false);
- Sizes.push_back(Size);
- // Map type is always TARGET_PARAM
- Types.push_back(OMP_MAP_TARGET_PARAM);
- // Remove TARGET_PARAM flag from the first element
- (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
+ CombinedInfo.Sizes.push_back(Size);
+ // Map type is always TARGET_PARAM, if generate info for captures.
+ CombinedInfo.Types.push_back(NotTargetParams ? OMP_MAP_NONE
+ : OMP_MAP_TARGET_PARAM);
+ // If any element has the present modifier, then make sure the runtime
+ // doesn't attempt to allocate the struct.
+ if (CurTypes.end() !=
+ llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
+ return Type & OMP_MAP_PRESENT;
+ }))
+ CombinedInfo.Types.back() |= OMP_MAP_PRESENT;
+ // Remove TARGET_PARAM flag from the first element if any.
+ if (!CurTypes.empty())
+ CurTypes.front() &= ~OMP_MAP_TARGET_PARAM;
// All other current entries will be MEMBER_OF the combined entry
// (except for PTR_AND_OBJ entries which do not have a placeholder value
// 0xFFFF in the MEMBER_OF field).
OpenMPOffloadMappingFlags MemberOfFlag =
- getMemberOfFlag(BasePointers.size() - 1);
+ getMemberOfFlag(CombinedInfo.BasePointers.size() - 1);
for (auto &M : CurTypes)
setCorrectMemberOfFlag(M, MemberOfFlag);
}
- /// Generate all the base pointers, section pointers, sizes and map
- /// types for the extracted mappable expressions. Also, for each item that
- /// relates with a device pointer, a pair of the relevant declaration and
- /// index where it occurs is appended to the device pointers info array.
- void generateAllInfo(MapBaseValuesArrayTy &BasePointers,
- MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
- MapFlagsArrayTy &Types) const {
+ /// Generate all the base pointers, section pointers, sizes, map types, and
+ /// mappers for the extracted mappable expressions (all included in \a
+ /// CombinedInfo). Also, for each item that relates with a device pointer, a
+ /// pair of the relevant declaration and index where it occurs is appended to
+ /// the device pointers info array.
+ void generateAllInfo(
+ MapCombinedInfoTy &CombinedInfo,
+ const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
// We have to process the component lists that relate with the same
// declaration in a single chunk so that we can generate the map flags
// correctly. Therefore, we organize all lists in a map.
@@ -7953,36 +8303,56 @@ public:
// Helper function to fill the information map for the different supported
// clauses.
auto &&InfoGen =
- [&Info](const ValueDecl *D,
- OMPClauseMappableExprCommon::MappableExprComponentListRef L,
- OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
- bool ReturnDevicePointer, bool IsImplicit,
- bool ForDeviceAddr = false) {
+ [&Info, &SkipVarSet](
+ const ValueDecl *D,
+ OMPClauseMappableExprCommon::MappableExprComponentListRef L,
+ OpenMPMapClauseKind MapType,
+ ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ bool ReturnDevicePointer, bool IsImplicit, const ValueDecl *Mapper,
+ const Expr *VarRef = nullptr, bool ForDeviceAddr = false) {
const ValueDecl *VD =
D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
- Info[VD].emplace_back(L, MapType, MapModifiers, ReturnDevicePointer,
- IsImplicit, ForDeviceAddr);
+ if (SkipVarSet.count(VD))
+ return;
+ Info[VD].emplace_back(L, MapType, MapModifiers, MotionModifiers,
+ ReturnDevicePointer, IsImplicit, Mapper, VarRef,
+ ForDeviceAddr);
};
assert(CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive");
const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
- for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>())
+ for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
+ const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
- InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifiers(),
- /*ReturnDevicePointer=*/false, C->isImplicit());
+ // The Expression is not correct if the mapping is implicit
+ const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
+ InfoGen(std::get<0>(L), std::get<1>(L), C->getMapType(),
+ C->getMapTypeModifiers(), llvm::None,
+ /*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L),
+ E);
+ ++EI;
}
- for (const auto *C : CurExecDir->getClausesOfKind<OMPToClause>())
+ }
+ for (const auto *C : CurExecDir->getClausesOfKind<OMPToClause>()) {
+ const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
- InfoGen(L.first, L.second, OMPC_MAP_to, llvm::None,
- /*ReturnDevicePointer=*/false, C->isImplicit());
+ InfoGen(std::get<0>(L), std::get<1>(L), OMPC_MAP_to, llvm::None,
+ C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
+ C->isImplicit(), std::get<2>(L), *EI);
+ ++EI;
}
- for (const auto *C : CurExecDir->getClausesOfKind<OMPFromClause>())
+ }
+ for (const auto *C : CurExecDir->getClausesOfKind<OMPFromClause>()) {
+ const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
- InfoGen(L.first, L.second, OMPC_MAP_from, llvm::None,
- /*ReturnDevicePointer=*/false, C->isImplicit());
+ InfoGen(std::get<0>(L), std::get<1>(L), OMPC_MAP_from, llvm::None,
+ C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
+ C->isImplicit(), std::get<2>(L), *EI);
+ ++EI;
}
+ }
// Look at the use_device_ptr clause information and mark the existing map
// entries as such. If there is no map information for an entry in the
@@ -7992,14 +8362,18 @@ public:
// emission of that entry until the whole struct has been processed.
llvm::MapVector<const ValueDecl *, SmallVector<DeferredDevicePtrEntryTy, 4>>
DeferredInfo;
+ MapCombinedInfoTy UseDevicePtrCombinedInfo;
for (const auto *C :
CurExecDir->getClausesOfKind<OMPUseDevicePtrClause>()) {
for (const auto L : C->component_lists()) {
- assert(!L.second.empty() && "Not expecting empty list of components!");
- const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
+ std::get<1>(L);
+ assert(!Components.empty() &&
+ "Not expecting empty list of components!");
+ const ValueDecl *VD = Components.back().getAssociatedDeclaration();
VD = cast<ValueDecl>(VD->getCanonicalDecl());
- const Expr *IE = L.second.back().getAssociatedExpression();
+ const Expr *IE = Components.back().getAssociatedExpression();
// If the first component is a member expression, we have to look into
// 'this', which maps to null in the map of map information. Otherwise
// look directly for the information.
@@ -8008,15 +8382,27 @@ public:
// We potentially have map information for this declaration already.
// Look for the first set of components that refer to it.
if (It != Info.end()) {
- auto CI = std::find_if(
- It->second.begin(), It->second.end(), [VD](const MapInfo &MI) {
- return MI.Components.back().getAssociatedDeclaration() == VD;
- });
+ auto *CI = llvm::find_if(It->second, [VD](const MapInfo &MI) {
+ return MI.Components.back().getAssociatedDeclaration() == VD;
+ });
// If we found a map entry, signal that the pointer has to be returned
// and move on to the next declaration.
+ // Exclude cases where the base pointer is mapped as array subscript,
+ // array section or array shaping. The base address is passed as a
+ // pointer to base in this case and cannot be used as a base for
+ // use_device_ptr list item.
if (CI != It->second.end()) {
- CI->ReturnDevicePointer = true;
- continue;
+ auto PrevCI = std::next(CI->Components.rbegin());
+ const auto *VarD = dyn_cast<VarDecl>(VD);
+ if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
+ isa<MemberExpr>(IE) ||
+ !VD->getType().getNonReferenceType()->isPointerType() ||
+ PrevCI == CI->Components.rend() ||
+ isa<MemberExpr>(PrevCI->getAssociatedExpression()) || !VarD ||
+ VarD->hasLocalStorage()) {
+ CI->ReturnDevicePointer = true;
+ continue;
+ }
}
}
@@ -8031,16 +8417,19 @@ public:
// Nonetheless, generateInfoForComponentList must be called to take
// the pointer into account for the calculation of the range of the
// partial struct.
- InfoGen(nullptr, L.second, OMPC_MAP_unknown, llvm::None,
- /*ReturnDevicePointer=*/false, C->isImplicit());
+ InfoGen(nullptr, Components, OMPC_MAP_unknown, llvm::None, llvm::None,
+ /*ReturnDevicePointer=*/false, C->isImplicit(), nullptr);
DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/false);
} else {
llvm::Value *Ptr =
CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
- BasePointers.emplace_back(Ptr, VD);
- Pointers.push_back(Ptr);
- Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
- Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
+ UseDevicePtrCombinedInfo.Exprs.push_back(VD);
+ UseDevicePtrCombinedInfo.BasePointers.emplace_back(Ptr, VD);
+ UseDevicePtrCombinedInfo.Pointers.push_back(Ptr);
+ UseDevicePtrCombinedInfo.Sizes.push_back(
+ llvm::Constant::getNullValue(CGF.Int64Ty));
+ UseDevicePtrCombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM);
+ UseDevicePtrCombinedInfo.Mappers.push_back(nullptr);
}
}
}
@@ -8055,12 +8444,13 @@ public:
for (const auto *C :
CurExecDir->getClausesOfKind<OMPUseDeviceAddrClause>()) {
for (const auto L : C->component_lists()) {
- assert(!L.second.empty() && "Not expecting empty list of components!");
- const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
+ assert(!std::get<1>(L).empty() &&
+ "Not expecting empty list of components!");
+ const ValueDecl *VD = std::get<1>(L).back().getAssociatedDeclaration();
if (!Processed.insert(VD).second)
continue;
VD = cast<ValueDecl>(VD->getCanonicalDecl());
- const Expr *IE = L.second.back().getAssociatedExpression();
+ const Expr *IE = std::get<1>(L).back().getAssociatedExpression();
// If the first component is a member expression, we have to look into
// 'this', which maps to null in the map of map information. Otherwise
// look directly for the information.
@@ -8091,9 +8481,9 @@ public:
// Nonetheless, generateInfoForComponentList must be called to take
// the pointer into account for the calculation of the range of the
// partial struct.
- InfoGen(nullptr, L.second, OMPC_MAP_unknown, llvm::None,
- /*ReturnDevicePointer=*/false, C->isImplicit(),
- /*ForDeviceAddr=*/true);
+ InfoGen(nullptr, std::get<1>(L), OMPC_MAP_unknown, llvm::None,
+ llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(),
+ nullptr, nullptr, /*ForDeviceAddr=*/true);
DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/true);
} else {
llvm::Value *Ptr;
@@ -8101,24 +8491,23 @@ public:
Ptr = CGF.EmitLValue(IE).getPointer(CGF);
else
Ptr = CGF.EmitScalarExpr(IE);
- BasePointers.emplace_back(Ptr, VD);
- Pointers.push_back(Ptr);
- Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
- Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
+ CombinedInfo.Exprs.push_back(VD);
+ CombinedInfo.BasePointers.emplace_back(Ptr, VD);
+ CombinedInfo.Pointers.push_back(Ptr);
+ CombinedInfo.Sizes.push_back(
+ llvm::Constant::getNullValue(CGF.Int64Ty));
+ CombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM);
+ CombinedInfo.Mappers.push_back(nullptr);
}
}
}
for (const auto &M : Info) {
- // We need to know when we generate information for the first component
- // associated with a capture, because the mapping flags depend on it.
- bool IsFirstComponentList = true;
+ // Underlying variable declaration used in the map clause.
+ const ValueDecl *VD = std::get<0>(M);
- // Temporary versions of arrays
- MapBaseValuesArrayTy CurBasePointers;
- MapValuesArrayTy CurPointers;
- MapValuesArrayTy CurSizes;
- MapFlagsArrayTy CurTypes;
+ // Temporary generated information.
+ MapCombinedInfoTy CurInfo;
StructRangeInfoTy PartialStruct;
for (const MapInfo &L : M.second) {
@@ -8126,16 +8515,18 @@ public:
"Not expecting declaration with no component lists.");
// Remember the current base pointer index.
- unsigned CurrentBasePointersIdx = CurBasePointers.size();
+ unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size();
+ CurInfo.NonContigInfo.IsNonContiguous =
+ L.Components.back().isNonContiguous();
generateInfoForComponentList(
- L.MapType, L.MapModifiers, L.Components, CurBasePointers,
- CurPointers, CurSizes, CurTypes, PartialStruct,
- IsFirstComponentList, L.IsImplicit, L.ForDeviceAddr);
+ L.MapType, L.MapModifiers, L.MotionModifiers, L.Components, CurInfo,
+ PartialStruct, /*IsFirstComponentList=*/false, L.IsImplicit,
+ L.Mapper, L.ForDeviceAddr, VD, L.VarRef);
// If this entry relates with a device pointer, set the relevant
// declaration and add the 'return pointer' flag.
if (L.ReturnDevicePointer) {
- assert(CurBasePointers.size() > CurrentBasePointersIdx &&
+ assert(CurInfo.BasePointers.size() > CurrentBasePointersIdx &&
"Unexpected number of mapped base pointers.");
const ValueDecl *RelevantVD =
@@ -8143,10 +8534,10 @@ public:
assert(RelevantVD &&
"No relevant declaration related with device pointer??");
- CurBasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
- CurTypes[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
+ CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(
+ RelevantVD);
+ CurInfo.Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
}
- IsFirstComponentList = false;
}
// Append any pending zero-length pointers which are struct members and
@@ -8165,7 +8556,7 @@ public:
// Entry is RETURN_PARAM. Also, set the placeholder value
// MEMBER_OF=FFFF so that the entry is later updated with the
// correct value of MEMBER_OF.
- CurTypes.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF);
+ CurInfo.Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF);
} else {
BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE),
@@ -8173,35 +8564,34 @@ public:
// Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
// value MEMBER_OF=FFFF so that the entry is later updated with the
// correct value of MEMBER_OF.
- CurTypes.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
- OMP_MAP_MEMBER_OF);
+ CurInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
+ OMP_MAP_MEMBER_OF);
}
- CurBasePointers.emplace_back(BasePtr, L.VD);
- CurPointers.push_back(Ptr);
- CurSizes.push_back(llvm::Constant::getNullValue(this->CGF.Int64Ty));
+ CurInfo.Exprs.push_back(L.VD);
+ CurInfo.BasePointers.emplace_back(BasePtr, L.VD);
+ CurInfo.Pointers.push_back(Ptr);
+ CurInfo.Sizes.push_back(
+ llvm::Constant::getNullValue(this->CGF.Int64Ty));
+ CurInfo.Mappers.push_back(nullptr);
}
}
// If there is an entry in PartialStruct it means we have a struct with
// individual members mapped. Emit an extra combined entry.
if (PartialStruct.Base.isValid())
- emitCombinedEntry(BasePointers, Pointers, Sizes, Types, CurTypes,
- PartialStruct);
+ emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD);
// We need to append the results of this capture to what we already have.
- BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
- Pointers.append(CurPointers.begin(), CurPointers.end());
- Sizes.append(CurSizes.begin(), CurSizes.end());
- Types.append(CurTypes.begin(), CurTypes.end());
+ CombinedInfo.append(CurInfo);
}
+ // Append data for use_device_ptr clauses.
+ CombinedInfo.append(UseDevicePtrCombinedInfo);
}
- /// Generate all the base pointers, section pointers, sizes and map types for
- /// the extracted map clauses of user-defined mapper.
- void generateAllInfoForMapper(MapBaseValuesArrayTy &BasePointers,
- MapValuesArrayTy &Pointers,
- MapValuesArrayTy &Sizes,
- MapFlagsArrayTy &Types) const {
+ /// Generate all the base pointers, section pointers, sizes, map types, and
+ /// mappers for the extracted map clauses of user-defined mapper (all included
+ /// in \a CombinedInfo).
+ void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo) const {
assert(CurDir.is<const OMPDeclareMapperDecl *>() &&
"Expect a declare mapper directive");
const auto *CurMapperDir = CurDir.get<const OMPDeclareMapperDecl *>();
@@ -8210,25 +8600,22 @@ public:
// correctly. Therefore, we organize all lists in a map.
llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
- // Helper function to fill the information map for the different supported
- // clauses.
- auto &&InfoGen = [&Info](
- const ValueDecl *D,
- OMPClauseMappableExprCommon::MappableExprComponentListRef L,
- OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
- bool ReturnDevicePointer, bool IsImplicit) {
- const ValueDecl *VD =
- D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
- Info[VD].emplace_back(L, MapType, MapModifiers, ReturnDevicePointer,
- IsImplicit);
- };
-
+ // Fill the information map for map clauses.
for (const auto *C : CurMapperDir->clauselists()) {
const auto *MC = cast<OMPMapClause>(C);
+ const auto *EI = MC->getVarRefs().begin();
for (const auto L : MC->component_lists()) {
- InfoGen(L.first, L.second, MC->getMapType(), MC->getMapTypeModifiers(),
- /*ReturnDevicePointer=*/false, MC->isImplicit());
+ // The Expression is not correct if the mapping is implicit
+ const Expr *E = (MC->getMapLoc().isValid()) ? *EI : nullptr;
+ const ValueDecl *VD =
+ std::get<0>(L) ? cast<ValueDecl>(std::get<0>(L)->getCanonicalDecl())
+ : nullptr;
+ // Get the corresponding user-defined mapper.
+ Info[VD].emplace_back(std::get<1>(L), MC->getMapType(),
+ MC->getMapTypeModifiers(), llvm::None,
+ /*ReturnDevicePointer=*/false, MC->isImplicit(),
+ std::get<2>(L), E);
+ ++EI;
}
}
@@ -8237,42 +8624,38 @@ public:
// associated with a capture, because the mapping flags depend on it.
bool IsFirstComponentList = true;
- // Temporary versions of arrays
- MapBaseValuesArrayTy CurBasePointers;
- MapValuesArrayTy CurPointers;
- MapValuesArrayTy CurSizes;
- MapFlagsArrayTy CurTypes;
+ // Underlying variable declaration used in the map clause.
+ const ValueDecl *VD = std::get<0>(M);
+
+ // Temporary generated information.
+ MapCombinedInfoTy CurInfo;
StructRangeInfoTy PartialStruct;
for (const MapInfo &L : M.second) {
assert(!L.Components.empty() &&
"Not expecting declaration with no component lists.");
generateInfoForComponentList(
- L.MapType, L.MapModifiers, L.Components, CurBasePointers,
- CurPointers, CurSizes, CurTypes, PartialStruct,
- IsFirstComponentList, L.IsImplicit, L.ForDeviceAddr);
+ L.MapType, L.MapModifiers, L.MotionModifiers, L.Components, CurInfo,
+ PartialStruct, IsFirstComponentList, L.IsImplicit, L.Mapper,
+ L.ForDeviceAddr, VD, L.VarRef);
IsFirstComponentList = false;
}
// If there is an entry in PartialStruct it means we have a struct with
// individual members mapped. Emit an extra combined entry.
- if (PartialStruct.Base.isValid())
- emitCombinedEntry(BasePointers, Pointers, Sizes, Types, CurTypes,
- PartialStruct);
+ if (PartialStruct.Base.isValid()) {
+ CurInfo.NonContigInfo.Dims.push_back(0);
+ emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD);
+ }
// We need to append the results of this capture to what we already have.
- BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
- Pointers.append(CurPointers.begin(), CurPointers.end());
- Sizes.append(CurSizes.begin(), CurSizes.end());
- Types.append(CurTypes.begin(), CurTypes.end());
+ CombinedInfo.append(CurInfo);
}
}
/// Emit capture info for lambdas for variables captured by reference.
void generateInfoForLambdaCaptures(
- const ValueDecl *VD, llvm::Value *Arg, MapBaseValuesArrayTy &BasePointers,
- MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
- MapFlagsArrayTy &Types,
+ const ValueDecl *VD, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
const auto *RD = VD->getType()
.getCanonicalType()
@@ -8292,13 +8675,15 @@ public:
LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture);
LambdaPointers.try_emplace(ThisLVal.getPointer(CGF),
VDLVal.getPointer(CGF));
- BasePointers.push_back(ThisLVal.getPointer(CGF));
- Pointers.push_back(ThisLValVal.getPointer(CGF));
- Sizes.push_back(
+ CombinedInfo.Exprs.push_back(VD);
+ CombinedInfo.BasePointers.push_back(ThisLVal.getPointer(CGF));
+ CombinedInfo.Pointers.push_back(ThisLValVal.getPointer(CGF));
+ CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
CGF.Int64Ty, /*isSigned=*/true));
- Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
- OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
+ CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
+ OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
+ CombinedInfo.Mappers.push_back(nullptr);
}
for (const LambdaCapture &LC : RD->captures()) {
if (!LC.capturesVariable())
@@ -8313,9 +8698,10 @@ public:
LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
VDLVal.getPointer(CGF));
- BasePointers.push_back(VarLVal.getPointer(CGF));
- Pointers.push_back(VarLValVal.getPointer(CGF));
- Sizes.push_back(CGF.Builder.CreateIntCast(
+ CombinedInfo.Exprs.push_back(VD);
+ CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
+ CombinedInfo.Pointers.push_back(VarLValVal.getPointer(CGF));
+ CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(
VD->getType().getCanonicalType().getNonReferenceType()),
CGF.Int64Ty, /*isSigned=*/true));
@@ -8323,12 +8709,14 @@ public:
RValue VarRVal = CGF.EmitLoadOfLValue(VarLVal, RD->getLocation());
LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
VDLVal.getPointer(CGF));
- BasePointers.push_back(VarLVal.getPointer(CGF));
- Pointers.push_back(VarRVal.getScalarVal());
- Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0));
+ CombinedInfo.Exprs.push_back(VD);
+ CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
+ CombinedInfo.Pointers.push_back(VarRVal.getScalarVal());
+ CombinedInfo.Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0));
}
- Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
- OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
+ CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
+ OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
+ CombinedInfo.Mappers.push_back(nullptr);
}
}
@@ -8361,13 +8749,10 @@ public:
}
}
- /// Generate the base pointers, section pointers, sizes and map types
- /// associated to a given capture.
+ /// Generate the base pointers, section pointers, sizes, map types, and
+ /// mappers associated to a given capture (all included in \a CombinedInfo).
void generateInfoForCapture(const CapturedStmt::Capture *Cap,
- llvm::Value *Arg,
- MapBaseValuesArrayTy &BasePointers,
- MapValuesArrayTy &Pointers,
- MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
+ llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
StructRangeInfoTy &PartialStruct) const {
assert(!Cap->capturesVariableArrayType() &&
"Not expecting to generate map info for a variable array type!");
@@ -8381,31 +8766,42 @@ public:
// pass the pointer by value. If it is a reference to a declaration, we just
// pass its value.
if (DevPointersMap.count(VD)) {
- BasePointers.emplace_back(Arg, VD);
- Pointers.push_back(Arg);
- Sizes.push_back(
- CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
- CGF.Int64Ty, /*isSigned=*/true));
- Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM);
+ CombinedInfo.Exprs.push_back(VD);
+ CombinedInfo.BasePointers.emplace_back(Arg, VD);
+ CombinedInfo.Pointers.push_back(Arg);
+ CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
+ CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty,
+ /*isSigned=*/true));
+ CombinedInfo.Types.push_back(
+ (Cap->capturesVariable() ? OMP_MAP_TO : OMP_MAP_LITERAL) |
+ OMP_MAP_TARGET_PARAM);
+ CombinedInfo.Mappers.push_back(nullptr);
return;
}
using MapData =
std::tuple<OMPClauseMappableExprCommon::MappableExprComponentListRef,
- OpenMPMapClauseKind, ArrayRef<OpenMPMapModifierKind>, bool>;
+ OpenMPMapClauseKind, ArrayRef<OpenMPMapModifierKind>, bool,
+ const ValueDecl *, const Expr *>;
SmallVector<MapData, 4> DeclComponentLists;
assert(CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive");
const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
+ const auto *EI = C->getVarRefs().begin();
for (const auto L : C->decl_component_lists(VD)) {
- assert(L.first == VD &&
- "We got information for the wrong declaration??");
- assert(!L.second.empty() &&
+ const ValueDecl *VDecl, *Mapper;
+ // The Expression is not correct if the mapping is implicit
+ const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
+ std::tie(VDecl, Components, Mapper) = L;
+ assert(VDecl == VD && "We got information for the wrong declaration??");
+ assert(!Components.empty() &&
"Not expecting declaration with no component lists.");
- DeclComponentLists.emplace_back(L.second, C->getMapType(),
+ DeclComponentLists.emplace_back(Components, C->getMapType(),
C->getMapTypeModifiers(),
- C->isImplicit());
+ C->isImplicit(), Mapper, E);
+ ++EI;
}
}
@@ -8422,11 +8818,15 @@ public:
OpenMPMapClauseKind MapType;
ArrayRef<OpenMPMapModifierKind> MapModifiers;
bool IsImplicit;
- std::tie(Components, MapType, MapModifiers, IsImplicit) = L;
+ const ValueDecl *Mapper;
+ const Expr *VarRef;
+ std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
+ L;
++Count;
for (const MapData &L1 : makeArrayRef(DeclComponentLists).slice(Count)) {
OMPClauseMappableExprCommon::MappableExprComponentListRef Components1;
- std::tie(Components1, MapType, MapModifiers, IsImplicit) = L1;
+ std::tie(Components1, MapType, MapModifiers, IsImplicit, Mapper,
+ VarRef) = L1;
auto CI = Components.rbegin();
auto CE = Components.rend();
auto SI = Components1.rbegin();
@@ -8512,14 +8912,17 @@ public:
OpenMPMapClauseKind MapType;
ArrayRef<OpenMPMapModifierKind> MapModifiers;
bool IsImplicit;
- std::tie(Components, MapType, MapModifiers, IsImplicit) = L;
+ const ValueDecl *Mapper;
+ const Expr *VarRef;
+ std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
+ L;
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
OverlappedComponents = Pair.getSecond();
bool IsFirstComponentList = true;
generateInfoForComponentList(
- MapType, MapModifiers, Components, BasePointers, Pointers, Sizes,
- Types, PartialStruct, IsFirstComponentList, IsImplicit,
- /*ForDeviceAddr=*/false, OverlappedComponents);
+ MapType, MapModifiers, llvm::None, Components, CombinedInfo,
+ PartialStruct, IsFirstComponentList, IsImplicit, Mapper,
+ /*ForDeviceAddr=*/false, VD, VarRef, OverlappedComponents);
}
// Go through other elements without overlapped elements.
bool IsFirstComponentList = OverlappedData.empty();
@@ -8528,86 +8931,54 @@ public:
OpenMPMapClauseKind MapType;
ArrayRef<OpenMPMapModifierKind> MapModifiers;
bool IsImplicit;
- std::tie(Components, MapType, MapModifiers, IsImplicit) = L;
+ const ValueDecl *Mapper;
+ const Expr *VarRef;
+ std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
+ L;
auto It = OverlappedData.find(&L);
if (It == OverlappedData.end())
- generateInfoForComponentList(MapType, MapModifiers, Components,
- BasePointers, Pointers, Sizes, Types,
- PartialStruct, IsFirstComponentList,
- IsImplicit);
+ generateInfoForComponentList(MapType, MapModifiers, llvm::None,
+ Components, CombinedInfo, PartialStruct,
+ IsFirstComponentList, IsImplicit, Mapper,
+ /*ForDeviceAddr=*/false, VD, VarRef);
IsFirstComponentList = false;
}
}
- /// Generate the base pointers, section pointers, sizes and map types
- /// associated with the declare target link variables.
- void generateInfoForDeclareTargetLink(MapBaseValuesArrayTy &BasePointers,
- MapValuesArrayTy &Pointers,
- MapValuesArrayTy &Sizes,
- MapFlagsArrayTy &Types) const {
- assert(CurDir.is<const OMPExecutableDirective *>() &&
- "Expect a executable directive");
- const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
- // Map other list items in the map clause which are not captured variables
- // but "declare target link" global variables.
- for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
- for (const auto L : C->component_lists()) {
- if (!L.first)
- continue;
- const auto *VD = dyn_cast<VarDecl>(L.first);
- if (!VD)
- continue;
- llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
- !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
- continue;
- StructRangeInfoTy PartialStruct;
- generateInfoForComponentList(
- C->getMapType(), C->getMapTypeModifiers(), L.second, BasePointers,
- Pointers, Sizes, Types, PartialStruct,
- /*IsFirstComponentList=*/true, C->isImplicit());
- assert(!PartialStruct.Base.isValid() &&
- "No partial structs for declare target link expected.");
- }
- }
- }
-
/// Generate the default map information for a given capture \a CI,
/// record field declaration \a RI and captured value \a CV.
void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
const FieldDecl &RI, llvm::Value *CV,
- MapBaseValuesArrayTy &CurBasePointers,
- MapValuesArrayTy &CurPointers,
- MapValuesArrayTy &CurSizes,
- MapFlagsArrayTy &CurMapTypes) const {
+ MapCombinedInfoTy &CombinedInfo) const {
bool IsImplicit = true;
// Do the default mapping.
if (CI.capturesThis()) {
- CurBasePointers.push_back(CV);
- CurPointers.push_back(CV);
+ CombinedInfo.Exprs.push_back(nullptr);
+ CombinedInfo.BasePointers.push_back(CV);
+ CombinedInfo.Pointers.push_back(CV);
const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
- CurSizes.push_back(
+ CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(CGF.getTypeSize(PtrTy->getPointeeType()),
CGF.Int64Ty, /*isSigned=*/true));
// Default map type.
- CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
+ CombinedInfo.Types.push_back(OMP_MAP_TO | OMP_MAP_FROM);
} else if (CI.capturesVariableByCopy()) {
- CurBasePointers.push_back(CV);
- CurPointers.push_back(CV);
+ const VarDecl *VD = CI.getCapturedVar();
+ CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
+ CombinedInfo.BasePointers.push_back(CV);
+ CombinedInfo.Pointers.push_back(CV);
if (!RI.getType()->isAnyPointerType()) {
// We have to signal to the runtime captures passed by value that are
// not pointers.
- CurMapTypes.push_back(OMP_MAP_LITERAL);
- CurSizes.push_back(CGF.Builder.CreateIntCast(
+ CombinedInfo.Types.push_back(OMP_MAP_LITERAL);
+ CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(RI.getType()), CGF.Int64Ty, /*isSigned=*/true));
} else {
// Pointers are implicitly mapped with a zero size and no flags
// (other than first map that is added for all implicit maps).
- CurMapTypes.push_back(OMP_MAP_NONE);
- CurSizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
+ CombinedInfo.Types.push_back(OMP_MAP_NONE);
+ CombinedInfo.Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
}
- const VarDecl *VD = CI.getCapturedVar();
auto I = FirstPrivateDecls.find(VD);
if (I != FirstPrivateDecls.end())
IsImplicit = I->getSecond();
@@ -8615,12 +8986,12 @@ public:
assert(CI.capturesVariable() && "Expected captured reference.");
const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
QualType ElementType = PtrTy->getPointeeType();
- CurSizes.push_back(CGF.Builder.CreateIntCast(
+ CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(ElementType), CGF.Int64Ty, /*isSigned=*/true));
// The default map type for a scalar/complex type is 'to' because by
// default the value doesn't have to be retrieved. For an aggregate
// type, the default is 'tofrom'.
- CurMapTypes.push_back(getMapModifiersForPrivateClauses(CI));
+ CombinedInfo.Types.push_back(getMapModifiersForPrivateClauses(CI));
const VarDecl *VD = CI.getCapturedVar();
auto I = FirstPrivateDecls.find(VD);
if (I != FirstPrivateDecls.end() &&
@@ -8631,56 +9002,157 @@ public:
CGF.Builder.CreateMemCpy(
CGF.MakeNaturalAlignAddrLValue(Addr, ElementType).getAddress(CGF),
Address(CV, CGF.getContext().getTypeAlignInChars(ElementType)),
- CurSizes.back(), /*IsVolatile=*/false);
+ CombinedInfo.Sizes.back(), /*IsVolatile=*/false);
// Use new global variable as the base pointers.
- CurBasePointers.push_back(Addr);
- CurPointers.push_back(Addr);
+ CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
+ CombinedInfo.BasePointers.push_back(Addr);
+ CombinedInfo.Pointers.push_back(Addr);
} else {
- CurBasePointers.push_back(CV);
+ CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
+ CombinedInfo.BasePointers.push_back(CV);
if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
CV, ElementType, CGF.getContext().getDeclAlign(VD),
AlignmentSource::Decl));
- CurPointers.push_back(PtrAddr.getPointer());
+ CombinedInfo.Pointers.push_back(PtrAddr.getPointer());
} else {
- CurPointers.push_back(CV);
+ CombinedInfo.Pointers.push_back(CV);
}
}
if (I != FirstPrivateDecls.end())
IsImplicit = I->getSecond();
}
// Every default map produces a single argument which is a target parameter.
- CurMapTypes.back() |= OMP_MAP_TARGET_PARAM;
+ CombinedInfo.Types.back() |= OMP_MAP_TARGET_PARAM;
// Add flag stating this is an implicit map.
if (IsImplicit)
- CurMapTypes.back() |= OMP_MAP_IMPLICIT;
+ CombinedInfo.Types.back() |= OMP_MAP_IMPLICIT;
+
+ // No user-defined mapper for default mapping.
+ CombinedInfo.Mappers.push_back(nullptr);
}
};
} // anonymous namespace
+static void emitNonContiguousDescriptor(
+ CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
+ CGOpenMPRuntime::TargetDataInfo &Info) {
+ CodeGenModule &CGM = CGF.CGM;
+ MappableExprsHandler::MapCombinedInfoTy::StructNonContiguousInfo
+ &NonContigInfo = CombinedInfo.NonContigInfo;
+
+ // Build an array of struct descriptor_dim and then assign it to
+ // offload_args.
+ //
+ // struct descriptor_dim {
+ // uint64_t offset;
+ // uint64_t count;
+ // uint64_t stride
+ // };
+ ASTContext &C = CGF.getContext();
+ QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
+ RecordDecl *RD;
+ RD = C.buildImplicitRecord("descriptor_dim");
+ RD->startDefinition();
+ addFieldToRecordDecl(C, RD, Int64Ty);
+ addFieldToRecordDecl(C, RD, Int64Ty);
+ addFieldToRecordDecl(C, RD, Int64Ty);
+ RD->completeDefinition();
+ QualType DimTy = C.getRecordType(RD);
+
+ enum { OffsetFD = 0, CountFD, StrideFD };
+ // We need two index variable here since the size of "Dims" is the same as the
+ // size of Components, however, the size of offset, count, and stride is equal
+ // to the size of base declaration that is non-contiguous.
+ for (unsigned I = 0, L = 0, E = NonContigInfo.Dims.size(); I < E; ++I) {
+ // Skip emitting ir if dimension size is 1 since it cannot be
+ // non-contiguous.
+ if (NonContigInfo.Dims[I] == 1)
+ continue;
+ llvm::APInt Size(/*numBits=*/32, NonContigInfo.Dims[I]);
+ QualType ArrayTy =
+ C.getConstantArrayType(DimTy, Size, nullptr, ArrayType::Normal, 0);
+ Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
+ for (unsigned II = 0, EE = NonContigInfo.Dims[I]; II < EE; ++II) {
+ unsigned RevIdx = EE - II - 1;
+ LValue DimsLVal = CGF.MakeAddrLValue(
+ CGF.Builder.CreateConstArrayGEP(DimsAddr, II), DimTy);
+ // Offset
+ LValue OffsetLVal = CGF.EmitLValueForField(
+ DimsLVal, *std::next(RD->field_begin(), OffsetFD));
+ CGF.EmitStoreOfScalar(NonContigInfo.Offsets[L][RevIdx], OffsetLVal);
+ // Count
+ LValue CountLVal = CGF.EmitLValueForField(
+ DimsLVal, *std::next(RD->field_begin(), CountFD));
+ CGF.EmitStoreOfScalar(NonContigInfo.Counts[L][RevIdx], CountLVal);
+ // Stride
+ LValue StrideLVal = CGF.EmitLValueForField(
+ DimsLVal, *std::next(RD->field_begin(), StrideFD));
+ CGF.EmitStoreOfScalar(NonContigInfo.Strides[L][RevIdx], StrideLVal);
+ }
+ // args[I] = &dims
+ Address DAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ DimsAddr, CGM.Int8PtrTy);
+ llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
+ Info.PointersArray, 0, I);
+ Address PAddr(P, CGF.getPointerAlign());
+ CGF.Builder.CreateStore(DAddr.getPointer(), PAddr);
+ ++L;
+ }
+}
+
+/// Emit a string constant containing the names of the values mapped to the
+/// offloading runtime library.
+llvm::Constant *
+emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
+ MappableExprsHandler::MappingExprInfo &MapExprs) {
+ llvm::Constant *SrcLocStr;
+ if (!MapExprs.getMapDecl()) {
+ SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr();
+ } else {
+ std::string ExprName = "";
+ if (MapExprs.getMapExpr()) {
+ PrintingPolicy P(CGF.getContext().getLangOpts());
+ llvm::raw_string_ostream OS(ExprName);
+ MapExprs.getMapExpr()->printPretty(OS, nullptr, P);
+ OS.flush();
+ } else {
+ ExprName = MapExprs.getMapDecl()->getNameAsString();
+ }
+
+ SourceLocation Loc = MapExprs.getMapDecl()->getLocation();
+ PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
+ const char *FileName = PLoc.getFilename();
+ unsigned Line = PLoc.getLine();
+ unsigned Column = PLoc.getColumn();
+ SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FileName, ExprName.c_str(),
+ Line, Column);
+ }
+
+ return SrcLocStr;
+}
+
/// Emit the arrays used to pass the captures and map information to the
/// offloading runtime library. If there is no map or capture information,
/// return nullptr by reference.
-static void
-emitOffloadingArrays(CodeGenFunction &CGF,
- MappableExprsHandler::MapBaseValuesArrayTy &BasePointers,
- MappableExprsHandler::MapValuesArrayTy &Pointers,
- MappableExprsHandler::MapValuesArrayTy &Sizes,
- MappableExprsHandler::MapFlagsArrayTy &MapTypes,
- CGOpenMPRuntime::TargetDataInfo &Info) {
+static void emitOffloadingArrays(
+ CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
+ CGOpenMPRuntime::TargetDataInfo &Info, llvm::OpenMPIRBuilder &OMPBuilder,
+ bool IsNonContiguous = false) {
CodeGenModule &CGM = CGF.CGM;
ASTContext &Ctx = CGF.getContext();
// Reset the array information.
Info.clearArrayInfo();
- Info.NumberOfPtrs = BasePointers.size();
+ Info.NumberOfPtrs = CombinedInfo.BasePointers.size();
if (Info.NumberOfPtrs) {
// Detect if we have any capture size requiring runtime evaluation of the
// size so that a constant array could be eventually used.
bool hasRuntimeEvaluationCaptureSize = false;
- for (llvm::Value *S : Sizes)
+ for (llvm::Value *S : CombinedInfo.Sizes)
if (!isa<llvm::Constant>(S)) {
hasRuntimeEvaluationCaptureSize = true;
break;
@@ -8695,6 +9167,9 @@ emitOffloadingArrays(CodeGenFunction &CGF,
CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
Info.PointersArray =
CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
+ Address MappersArray =
+ CGF.CreateMemTemp(PointerArrayType, ".offload_mappers");
+ Info.MappersArray = MappersArray.getPointer();
// If we don't have any VLA types or other types that require runtime
// evaluation, we can use a constant array for the map sizes, otherwise we
@@ -8711,8 +9186,15 @@ emitOffloadingArrays(CodeGenFunction &CGF,
// We expect all the sizes to be constant, so we collect them to create
// a constant array.
SmallVector<llvm::Constant *, 16> ConstSizes;
- for (llvm::Value *S : Sizes)
- ConstSizes.push_back(cast<llvm::Constant>(S));
+ for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) {
+ if (IsNonContiguous &&
+ (CombinedInfo.Types[I] & MappableExprsHandler::OMP_MAP_NON_CONTIG)) {
+ ConstSizes.push_back(llvm::ConstantInt::get(
+ CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I]));
+ } else {
+ ConstSizes.push_back(cast<llvm::Constant>(CombinedInfo.Sizes[I]));
+ }
+ }
auto *SizesArrayInit = llvm::ConstantArray::get(
llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes);
@@ -8727,8 +9209,8 @@ emitOffloadingArrays(CodeGenFunction &CGF,
// The map types are always constant so we don't need to generate code to
// fill arrays. Instead, we create an array constant.
- SmallVector<uint64_t, 4> Mapping(MapTypes.size(), 0);
- llvm::copy(MapTypes, Mapping.begin());
+ SmallVector<uint64_t, 4> Mapping(CombinedInfo.Types.size(), 0);
+ llvm::copy(CombinedInfo.Types, Mapping.begin());
llvm::Constant *MapTypesArrayInit =
llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
std::string MaptypesName =
@@ -8740,8 +9222,57 @@ emitOffloadingArrays(CodeGenFunction &CGF,
MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
Info.MapTypesArray = MapTypesArrayGbl;
+ // The information types are only built if there is debug information
+ // requested.
+ if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) {
+ Info.MapNamesArray = llvm::Constant::getNullValue(
+ llvm::Type::getInt8Ty(CGF.Builder.getContext())->getPointerTo());
+ } else {
+ auto fillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
+ return emitMappingInformation(CGF, OMPBuilder, MapExpr);
+ };
+ SmallVector<llvm::Constant *, 4> InfoMap(CombinedInfo.Exprs.size());
+ llvm::transform(CombinedInfo.Exprs, InfoMap.begin(), fillInfoMap);
+
+ llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get(
+ llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(CGF.Builder.getContext())->getPointerTo(),
+ CombinedInfo.Exprs.size()),
+ InfoMap);
+ auto *MapNamesArrayGbl = new llvm::GlobalVariable(
+ CGM.getModule(), MapNamesArrayInit->getType(),
+ /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
+ MapNamesArrayInit,
+ CGM.getOpenMPRuntime().getName({"offload_mapnames"}));
+ Info.MapNamesArray = MapNamesArrayGbl;
+ }
+
+ // If there's a present map type modifier, it must not be applied to the end
+ // of a region, so generate a separate map type array in that case.
+ if (Info.separateBeginEndCalls()) {
+ bool EndMapTypesDiffer = false;
+ for (uint64_t &Type : Mapping) {
+ if (Type & MappableExprsHandler::OMP_MAP_PRESENT) {
+ Type &= ~MappableExprsHandler::OMP_MAP_PRESENT;
+ EndMapTypesDiffer = true;
+ }
+ }
+ if (EndMapTypesDiffer) {
+ MapTypesArrayInit =
+ llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
+ MaptypesName = CGM.getOpenMPRuntime().getName({"offload_maptypes"});
+ MapTypesArrayGbl = new llvm::GlobalVariable(
+ CGM.getModule(), MapTypesArrayInit->getType(),
+ /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
+ MapTypesArrayInit, MaptypesName);
+ MapTypesArrayGbl->setUnnamedAddr(
+ llvm::GlobalValue::UnnamedAddr::Global);
+ Info.MapTypesArrayEnd = MapTypesArrayGbl;
+ }
+ }
+
for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
- llvm::Value *BPVal = *BasePointers[I];
+ llvm::Value *BPVal = *CombinedInfo.BasePointers[I];
llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
Info.BasePointersArray, 0, I);
@@ -8751,10 +9282,11 @@ emitOffloadingArrays(CodeGenFunction &CGF,
CGF.Builder.CreateStore(BPVal, BPAddr);
if (Info.requiresDevicePointerInfo())
- if (const ValueDecl *DevVD = BasePointers[I].getDevicePtrDecl())
+ if (const ValueDecl *DevVD =
+ CombinedInfo.BasePointers[I].getDevicePtrDecl())
Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
- llvm::Value *PVal = Pointers[I];
+ llvm::Value *PVal = CombinedInfo.Pointers[I];
llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
Info.PointersArray, 0, I);
@@ -8770,20 +9302,53 @@ emitOffloadingArrays(CodeGenFunction &CGF,
/*Idx0=*/0,
/*Idx1=*/I);
Address SAddr(S, Ctx.getTypeAlignInChars(Int64Ty));
- CGF.Builder.CreateStore(
- CGF.Builder.CreateIntCast(Sizes[I], CGM.Int64Ty, /*isSigned=*/true),
- SAddr);
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(CombinedInfo.Sizes[I],
+ CGM.Int64Ty,
+ /*isSigned=*/true),
+ SAddr);
}
+
+ // Fill up the mapper array.
+ llvm::Value *MFunc = llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
+ if (CombinedInfo.Mappers[I]) {
+ MFunc = CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
+ cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
+ MFunc = CGF.Builder.CreatePointerCast(MFunc, CGM.VoidPtrTy);
+ Info.HasMapper = true;
+ }
+ Address MAddr = CGF.Builder.CreateConstArrayGEP(MappersArray, I);
+ CGF.Builder.CreateStore(MFunc, MAddr);
}
}
+
+ if (!IsNonContiguous || CombinedInfo.NonContigInfo.Offsets.empty() ||
+ Info.NumberOfPtrs == 0)
+ return;
+
+ emitNonContiguousDescriptor(CGF, CombinedInfo, Info);
}
+namespace {
+/// Additional arguments for emitOffloadingArraysArgument function.
+struct ArgumentsOptions {
+ bool ForEndCall = false;
+ ArgumentsOptions() = default;
+ ArgumentsOptions(bool ForEndCall) : ForEndCall(ForEndCall) {}
+};
+} // namespace
+
/// Emit the arguments to be passed to the runtime library based on the
-/// arrays of pointers, sizes and map types.
+/// arrays of base pointers, pointers, sizes, map types, and mappers. If
+/// ForEndCall, emit map types to be passed for the end of the region instead of
+/// the beginning.
static void emitOffloadingArraysArgument(
CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
- llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) {
+ llvm::Value *&MapTypesArrayArg, llvm::Value *&MapNamesArrayArg,
+ llvm::Value *&MappersArrayArg, CGOpenMPRuntime::TargetDataInfo &Info,
+ const ArgumentsOptions &Options = ArgumentsOptions()) {
+ assert((!Options.ForEndCall || Info.separateBeginEndCalls()) &&
+ "expected region end call to runtime only when end call is separate");
CodeGenModule &CGM = CGF.CGM;
if (Info.NumberOfPtrs) {
BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
@@ -8800,15 +9365,36 @@ static void emitOffloadingArraysArgument(
/*Idx0=*/0, /*Idx1=*/0);
MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
- Info.MapTypesArray,
+ Options.ForEndCall && Info.MapTypesArrayEnd ? Info.MapTypesArrayEnd
+ : Info.MapTypesArray,
/*Idx0=*/0,
/*Idx1=*/0);
+
+ // Only emit the mapper information arrays if debug information is
+ // requested.
+ if (CGF.CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
+ MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
+ else
+ MapNamesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
+ Info.MapNamesArray,
+ /*Idx0=*/0,
+ /*Idx1=*/0);
+ // If there is no user-defined mapper, set the mapper array to nullptr to
+ // avoid an unnecessary data privatization
+ if (!Info.HasMapper)
+ MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
+ else
+ MappersArrayArg =
+ CGF.Builder.CreatePointerCast(Info.MappersArray, CGM.VoidPtrPtrTy);
} else {
BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
SizesArrayArg = llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
MapTypesArrayArg =
llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
+ MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
+ MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
}
}
@@ -8924,7 +9510,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
/// \code
/// void .omp_mapper.<type_name>.<mapper_id>.(void *rt_mapper_handle,
/// void *base, void *begin,
-/// int64_t size, int64_t type) {
+/// int64_t size, int64_t type,
+/// void *name = nullptr) {
/// // Allocate space for an array section first.
/// if (size > 1 && !maptype.IsDelete)
/// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
@@ -8935,10 +9522,11 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
/// for (auto c : all_components) {
/// if (c.hasMapper())
/// (*c.Mapper())(rt_mapper_handle, c.arg_base, c.arg_begin, c.arg_size,
-/// c.arg_type);
+/// c.arg_type, c.arg_name);
/// else
/// __tgt_push_mapper_component(rt_mapper_handle, c.arg_base,
-/// c.arg_begin, c.arg_size, c.arg_type);
+/// c.arg_begin, c.arg_size, c.arg_type,
+/// c.arg_name);
/// }
/// }
/// // Delete the array section.
@@ -8971,12 +9559,15 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
ImplicitParamDecl::Other);
ImplicitParamDecl TypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
ImplicitParamDecl::Other);
+ ImplicitParamDecl NameArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
+ ImplicitParamDecl::Other);
FunctionArgList Args;
Args.push_back(&HandleArg);
Args.push_back(&BaseArg);
Args.push_back(&BeginArg);
Args.push_back(&SizeArg);
Args.push_back(&TypeArg);
+ Args.push_back(&NameArg);
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
@@ -8995,6 +9586,9 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
llvm::Value *Size = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&SizeArg), /*Volatile=*/false,
C.getPointerType(Int64Ty), Loc);
+ // Convert the size in bytes into the number of array elements.
+ Size = MapperCGF.Builder.CreateExactUDiv(
+ Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast(
MapperCGF.GetAddrOfLocalVar(&BeginArg).getPointer(),
CGM.getTypes().ConvertTypeForMem(C.getPointerType(PtrTy)));
@@ -9033,6 +9627,7 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// Emit the loop body block.
MapperCGF.EmitBlock(BodyBB);
+ llvm::BasicBlock *LastBB = BodyBB;
llvm::PHINode *PtrPHI = MapperCGF.Builder.CreatePHI(
PtrBegin->getType(), 2, "omp.arraymap.ptrcurrent");
PtrPHI->addIncoming(PtrBegin, EntryBB);
@@ -9050,12 +9645,9 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
(void)Scope.Privatize();
// Get map clause information. Fill up the arrays with all mapped variables.
- MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
- MappableExprsHandler::MapValuesArrayTy Pointers;
- MappableExprsHandler::MapValuesArrayTy Sizes;
- MappableExprsHandler::MapFlagsArrayTy MapTypes;
+ MappableExprsHandler::MapCombinedInfoTy Info;
MappableExprsHandler MEHandler(*D, MapperCGF);
- MEHandler.generateAllInfoForMapper(BasePointers, Pointers, Sizes, MapTypes);
+ MEHandler.generateAllInfoForMapper(Info);
// Call the runtime API __tgt_mapper_num_components to get the number of
// pre-existing components.
@@ -9069,17 +9661,21 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
MapperCGF.Builder.getInt64(MappableExprsHandler::getFlagMemberOffset()));
// Fill up the runtime mapper handle for all components.
- for (unsigned I = 0; I < BasePointers.size(); ++I) {
+ for (unsigned I = 0; I < Info.BasePointers.size(); ++I) {
llvm::Value *CurBaseArg = MapperCGF.Builder.CreateBitCast(
- *BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
+ *Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
llvm::Value *CurBeginArg = MapperCGF.Builder.CreateBitCast(
- Pointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
- llvm::Value *CurSizeArg = Sizes[I];
+ Info.Pointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
+ llvm::Value *CurSizeArg = Info.Sizes[I];
+ llvm::Value *CurNameArg =
+ (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
+ ? llvm::ConstantPointerNull::get(CGM.VoidPtrTy)
+ : emitMappingInformation(MapperCGF, OMPBuilder, Info.Exprs[I]);
// Extract the MEMBER_OF field from the map type.
llvm::BasicBlock *MemberBB = MapperCGF.createBasicBlock("omp.member");
MapperCGF.EmitBlock(MemberBB);
- llvm::Value *OriMapType = MapperCGF.Builder.getInt64(MapTypes[I]);
+ llvm::Value *OriMapType = MapperCGF.Builder.getInt64(Info.Types[I]);
llvm::Value *Member = MapperCGF.Builder.CreateAnd(
OriMapType,
MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_MEMBER_OF));
@@ -9155,6 +9751,7 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_TO));
// In case of tofrom, do nothing.
MapperCGF.EmitBlock(EndBB);
+ LastBB = EndBB;
llvm::PHINode *CurMapType =
MapperCGF.Builder.CreatePHI(CGM.Int64Ty, 4, "omp.maptype");
CurMapType->addIncoming(AllocMapType, AllocBB);
@@ -9162,23 +9759,29 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CurMapType->addIncoming(FromMapType, FromBB);
CurMapType->addIncoming(MemberMapType, ToElseBB);
- // TODO: call the corresponding mapper function if a user-defined mapper is
- // associated with this map clause.
- // Call the runtime API __tgt_push_mapper_component to fill up the runtime
- // data structure.
- llvm::Value *OffloadingArgs[] = {Handle, CurBaseArg, CurBeginArg,
- CurSizeArg, CurMapType};
- MapperCGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_push_mapper_component),
- OffloadingArgs);
+ llvm::Value *OffloadingArgs[] = {Handle, CurBaseArg, CurBeginArg,
+ CurSizeArg, CurMapType, CurNameArg};
+ if (Info.Mappers[I]) {
+ // Call the corresponding mapper function.
+ llvm::Function *MapperFunc = getOrCreateUserDefinedMapperFunc(
+ cast<OMPDeclareMapperDecl>(Info.Mappers[I]));
+ assert(MapperFunc && "Expect a valid mapper function is available.");
+ MapperCGF.EmitNounwindRuntimeCall(MapperFunc, OffloadingArgs);
+ } else {
+ // Call the runtime API __tgt_push_mapper_component to fill up the runtime
+ // data structure.
+ MapperCGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_push_mapper_component),
+ OffloadingArgs);
+ }
}
// Update the pointer to point to the next element that needs to be mapped,
// and check whether we have mapped all elements.
llvm::Value *PtrNext = MapperCGF.Builder.CreateConstGEP1_32(
PtrPHI, /*Idx0=*/1, "omp.arraymap.next");
- PtrPHI->addIncoming(PtrNext, BodyBB);
+ PtrPHI->addIncoming(PtrNext, LastBB);
llvm::Value *IsDone =
MapperCGF.Builder.CreateICmpEQ(PtrNext, PtrEnd, "omp.arraymap.isdone");
llvm::BasicBlock *ExitBB = MapperCGF.createBasicBlock("omp.arraymap.exit");
@@ -9247,15 +9850,27 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
MapType,
MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
MappableExprsHandler::OMP_MAP_FROM)));
+ llvm::Value *MapNameArg = llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
+
// Call the runtime API __tgt_push_mapper_component to fill up the runtime
// data structure.
- llvm::Value *OffloadingArgs[] = {Handle, Base, Begin, ArraySize, MapTypeArg};
+ llvm::Value *OffloadingArgs[] = {Handle, Base, Begin,
+ ArraySize, MapTypeArg, MapNameArg};
MapperCGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___tgt_push_mapper_component),
OffloadingArgs);
}
+llvm::Function *CGOpenMPRuntime::getOrCreateUserDefinedMapperFunc(
+ const OMPDeclareMapperDecl *D) {
+ auto I = UDMMap.find(D);
+ if (I != UDMMap.end())
+ return I->second;
+ emitUserDefinedMapper(D);
+ return UDMMap.lookup(D);
+}
+
void CGOpenMPRuntime::emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
@@ -9270,10 +9885,11 @@ void CGOpenMPRuntime::emitTargetNumIterationsCall(
if (!TD)
return;
const auto *LD = cast<OMPLoopDirective>(TD);
- auto &&CodeGen = [LD, DeviceID, SizeEmitter, this](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ auto &&CodeGen = [LD, DeviceID, SizeEmitter, &D, this](CodeGenFunction &CGF,
+ PrePostActionTy &) {
if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD)) {
- llvm::Value *Args[] = {DeviceID, NumIterations};
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
+ llvm::Value *Args[] = {RTLoc, DeviceID, NumIterations};
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_push_target_tripcount),
@@ -9295,7 +9911,8 @@ void CGOpenMPRuntime::emitTargetCall(
assert(OutlinedFn && "Invalid outlined function!");
- const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>();
+ const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
+ D.hasClausesOfKind<OMPNowaitClause>();
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
@@ -9306,9 +9923,11 @@ void CGOpenMPRuntime::emitTargetCall(
CodeGenFunction::OMPTargetDataInfo InputInfo;
llvm::Value *MapTypesArray = nullptr;
+ llvm::Value *MapNamesArray = nullptr;
// Fill up the pointer arrays and transfer execution to the device.
auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
- &MapTypesArray, &CS, RequiresOuterTask, &CapturedVars,
+ &MapTypesArray, &MapNamesArray, &CS, RequiresOuterTask,
+ &CapturedVars,
SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) {
if (Device.getInt() == OMPC_DEVICE_ancestor) {
// Reverse offloading is not supported, so just execute on the host.
@@ -9355,6 +9974,9 @@ void CGOpenMPRuntime::emitTargetCall(
llvm::Value *NumTeams = emitNumTeamsForTargetDirective(CGF, D);
llvm::Value *NumThreads = emitNumThreadsForTargetDirective(CGF, D);
+ // Source location for the ident struct
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
+
// Emit tripcount for the target loop-based directive.
emitTargetNumIterationsCall(CGF, D, DeviceID, SizeEmitter);
@@ -9394,32 +10016,39 @@ void CGOpenMPRuntime::emitTargetCall(
// passed to the runtime library - a 32-bit integer with the value zero.
assert(NumThreads && "Thread limit expression should be available along "
"with number of teams.");
- llvm::Value *OffloadingArgs[] = {DeviceID,
+ llvm::Value *OffloadingArgs[] = {RTLoc,
+ DeviceID,
OutlinedFnID,
PointerNum,
InputInfo.BasePointersArray.getPointer(),
InputInfo.PointersArray.getPointer(),
InputInfo.SizesArray.getPointer(),
MapTypesArray,
+ MapNamesArray,
+ InputInfo.MappersArray.getPointer(),
NumTeams,
NumThreads};
Return = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), HasNowait ? OMPRTL___tgt_target_teams_nowait
- : OMPRTL___tgt_target_teams),
+ CGM.getModule(), HasNowait
+ ? OMPRTL___tgt_target_teams_nowait_mapper
+ : OMPRTL___tgt_target_teams_mapper),
OffloadingArgs);
} else {
- llvm::Value *OffloadingArgs[] = {DeviceID,
+ llvm::Value *OffloadingArgs[] = {RTLoc,
+ DeviceID,
OutlinedFnID,
PointerNum,
InputInfo.BasePointersArray.getPointer(),
InputInfo.PointersArray.getPointer(),
InputInfo.SizesArray.getPointer(),
- MapTypesArray};
+ MapTypesArray,
+ MapNamesArray,
+ InputInfo.MappersArray.getPointer()};
Return = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(),
- HasNowait ? OMPRTL___tgt_target_nowait : OMPRTL___tgt_target),
+ CGM.getModule(), HasNowait ? OMPRTL___tgt_target_nowait_mapper
+ : OMPRTL___tgt_target_mapper),
OffloadingArgs);
}
@@ -9454,96 +10083,96 @@ void CGOpenMPRuntime::emitTargetCall(
};
auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
- &CapturedVars, RequiresOuterTask,
+ &MapNamesArray, &CapturedVars, RequiresOuterTask,
&CS](CodeGenFunction &CGF, PrePostActionTy &) {
// Fill up the arrays with all the captured variables.
- MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
- MappableExprsHandler::MapValuesArrayTy Pointers;
- MappableExprsHandler::MapValuesArrayTy Sizes;
- MappableExprsHandler::MapFlagsArrayTy MapTypes;
+ MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
// Get mappable expression information.
MappableExprsHandler MEHandler(D, CGF);
llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
auto RI = CS.getCapturedRecordDecl()->field_begin();
auto CV = CapturedVars.begin();
for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
CE = CS.capture_end();
CI != CE; ++CI, ++RI, ++CV) {
- MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers;
- MappableExprsHandler::MapValuesArrayTy CurPointers;
- MappableExprsHandler::MapValuesArrayTy CurSizes;
- MappableExprsHandler::MapFlagsArrayTy CurMapTypes;
+ MappableExprsHandler::MapCombinedInfoTy CurInfo;
MappableExprsHandler::StructRangeInfoTy PartialStruct;
// VLA sizes are passed to the outlined region by copy and do not have map
// information associated.
if (CI->capturesVariableArrayType()) {
- CurBasePointers.push_back(*CV);
- CurPointers.push_back(*CV);
- CurSizes.push_back(CGF.Builder.CreateIntCast(
+ CurInfo.Exprs.push_back(nullptr);
+ CurInfo.BasePointers.push_back(*CV);
+ CurInfo.Pointers.push_back(*CV);
+ CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true));
// Copy to the device as an argument. No need to retrieve it.
- CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
- MappableExprsHandler::OMP_MAP_TARGET_PARAM |
- MappableExprsHandler::OMP_MAP_IMPLICIT);
+ CurInfo.Types.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
+ MappableExprsHandler::OMP_MAP_TARGET_PARAM |
+ MappableExprsHandler::OMP_MAP_IMPLICIT);
+ CurInfo.Mappers.push_back(nullptr);
} else {
// If we have any information in the map clause, we use it, otherwise we
// just do a default mapping.
- MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers,
- CurSizes, CurMapTypes, PartialStruct);
- if (CurBasePointers.empty())
- MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
- CurPointers, CurSizes, CurMapTypes);
+ MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct);
+ if (!CI->capturesThis())
+ MappedVarSet.insert(CI->getCapturedVar());
+ else
+ MappedVarSet.insert(nullptr);
+ if (CurInfo.BasePointers.empty() && !PartialStruct.Base.isValid())
+ MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
// Generate correct mapping for variables captured by reference in
// lambdas.
if (CI->capturesVariable())
- MEHandler.generateInfoForLambdaCaptures(
- CI->getCapturedVar(), *CV, CurBasePointers, CurPointers, CurSizes,
- CurMapTypes, LambdaPointers);
+ MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV,
+ CurInfo, LambdaPointers);
}
// We expect to have at least an element of information for this capture.
- assert(!CurBasePointers.empty() &&
+ assert((!CurInfo.BasePointers.empty() || PartialStruct.Base.isValid()) &&
"Non-existing map pointer for capture!");
- assert(CurBasePointers.size() == CurPointers.size() &&
- CurBasePointers.size() == CurSizes.size() &&
- CurBasePointers.size() == CurMapTypes.size() &&
+ assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
+ CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
+ CurInfo.BasePointers.size() == CurInfo.Types.size() &&
+ CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
"Inconsistent map information sizes!");
// If there is an entry in PartialStruct it means we have a struct with
// individual members mapped. Emit an extra combined entry.
if (PartialStruct.Base.isValid())
- MEHandler.emitCombinedEntry(BasePointers, Pointers, Sizes, MapTypes,
- CurMapTypes, PartialStruct);
+ MEHandler.emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct,
+ nullptr, /*NoTargetParam=*/false);
// We need to append the results of this capture to what we already have.
- BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
- Pointers.append(CurPointers.begin(), CurPointers.end());
- Sizes.append(CurSizes.begin(), CurSizes.end());
- MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
+ CombinedInfo.append(CurInfo);
}
// Adjust MEMBER_OF flags for the lambdas captures.
- MEHandler.adjustMemberOfForLambdaCaptures(LambdaPointers, BasePointers,
- Pointers, MapTypes);
- // Map other list items in the map clause which are not captured variables
- // but "declare target link" global variables.
- MEHandler.generateInfoForDeclareTargetLink(BasePointers, Pointers, Sizes,
- MapTypes);
+ MEHandler.adjustMemberOfForLambdaCaptures(
+ LambdaPointers, CombinedInfo.BasePointers, CombinedInfo.Pointers,
+ CombinedInfo.Types);
+ // Map any list items in a map clause that were not captures because they
+ // weren't referenced within the construct.
+ MEHandler.generateAllInfo(CombinedInfo, MappedVarSet);
TargetDataInfo Info;
// Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
- emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
- Info.PointersArray, Info.SizesArray,
- Info.MapTypesArray, Info);
+ emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder);
+ emitOffloadingArraysArgument(
+ CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
+ Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
+ {/*ForEndTask=*/false});
+
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
InputInfo.BasePointersArray =
Address(Info.BasePointersArray, CGM.getPointerAlign());
InputInfo.PointersArray =
Address(Info.PointersArray, CGM.getPointerAlign());
InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
+ InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
MapTypesArray = Info.MapTypesArray;
+ MapNamesArray = Info.MapNamesArray;
if (RequiresOuterTask)
CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
else
@@ -9712,8 +10341,7 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
return;
- scanForTargetRegionsFunctions(
- E->getInnermostCapturedStmt()->getCapturedStmt(), ParentName);
+ scanForTargetRegionsFunctions(E->getRawStmt(), ParentName);
return;
}
@@ -10131,24 +10759,25 @@ void CGOpenMPRuntime::emitTargetDataCalls(
auto &&BeginThenGen = [this, &D, Device, &Info,
&CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
// Fill up the arrays with all the mapped variables.
- MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
- MappableExprsHandler::MapValuesArrayTy Pointers;
- MappableExprsHandler::MapValuesArrayTy Sizes;
- MappableExprsHandler::MapFlagsArrayTy MapTypes;
+ MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
// Get map clause information.
- MappableExprsHandler MCHandler(D, CGF);
- MCHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
+ MappableExprsHandler MEHandler(D, CGF);
+ MEHandler.generateAllInfo(CombinedInfo);
// Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
+ emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
+ /*IsNonContiguous=*/true);
llvm::Value *BasePointersArrayArg = nullptr;
llvm::Value *PointersArrayArg = nullptr;
llvm::Value *SizesArrayArg = nullptr;
llvm::Value *MapTypesArrayArg = nullptr;
+ llvm::Value *MapNamesArrayArg = nullptr;
+ llvm::Value *MappersArrayArg = nullptr;
emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
- SizesArrayArg, MapTypesArrayArg, Info);
+ SizesArrayArg, MapTypesArrayArg,
+ MapNamesArrayArg, MappersArrayArg, Info);
// Emit device ID if any.
llvm::Value *DeviceID = nullptr;
@@ -10161,13 +10790,23 @@ void CGOpenMPRuntime::emitTargetDataCalls(
// Emit the number of elements in the offloading arrays.
llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
+ //
+ // Source location for the ident struct
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- llvm::Value *OffloadingArgs[] = {
- DeviceID, PointerNum, BasePointersArrayArg,
- PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_target_data_begin),
- OffloadingArgs);
+ llvm::Value *OffloadingArgs[] = {RTLoc,
+ DeviceID,
+ PointerNum,
+ BasePointersArrayArg,
+ PointersArrayArg,
+ SizesArrayArg,
+ MapTypesArrayArg,
+ MapNamesArrayArg,
+ MappersArrayArg};
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_target_data_begin_mapper),
+ OffloadingArgs);
// If device pointer privatization is required, emit the body of the region
// here. It will have to be duplicated: with and without privatization.
@@ -10176,16 +10815,20 @@ void CGOpenMPRuntime::emitTargetDataCalls(
};
// Generate code for the closing of the data region.
- auto &&EndThenGen = [this, Device, &Info](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ auto &&EndThenGen = [this, Device, &Info, &D](CodeGenFunction &CGF,
+ PrePostActionTy &) {
assert(Info.isValid() && "Invalid data environment closing arguments.");
llvm::Value *BasePointersArrayArg = nullptr;
llvm::Value *PointersArrayArg = nullptr;
llvm::Value *SizesArrayArg = nullptr;
llvm::Value *MapTypesArrayArg = nullptr;
+ llvm::Value *MapNamesArrayArg = nullptr;
+ llvm::Value *MappersArrayArg = nullptr;
emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
- SizesArrayArg, MapTypesArrayArg, Info);
+ SizesArrayArg, MapTypesArrayArg,
+ MapNamesArrayArg, MappersArrayArg, Info,
+ {/*ForEndCall=*/true});
// Emit device ID if any.
llvm::Value *DeviceID = nullptr;
@@ -10199,12 +10842,22 @@ void CGOpenMPRuntime::emitTargetDataCalls(
// Emit the number of elements in the offloading arrays.
llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
- llvm::Value *OffloadingArgs[] = {
- DeviceID, PointerNum, BasePointersArrayArg,
- PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_target_data_end),
- OffloadingArgs);
+ // Source location for the ident struct
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
+
+ llvm::Value *OffloadingArgs[] = {RTLoc,
+ DeviceID,
+ PointerNum,
+ BasePointersArrayArg,
+ PointersArrayArg,
+ SizesArrayArg,
+ MapTypesArrayArg,
+ MapNamesArrayArg,
+ MappersArrayArg};
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_target_data_end_mapper),
+ OffloadingArgs);
};
// If we need device pointer privatization, we need to emit the body of the
@@ -10257,9 +10910,10 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
CodeGenFunction::OMPTargetDataInfo InputInfo;
llvm::Value *MapTypesArray = nullptr;
+ llvm::Value *MapNamesArray = nullptr;
// Generate the code for the opening of the data environment.
- auto &&ThenGen = [this, &D, Device, &InputInfo,
- &MapTypesArray](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&ThenGen = [this, &D, Device, &InputInfo, &MapTypesArray,
+ &MapNamesArray](CodeGenFunction &CGF, PrePostActionTy &) {
// Emit device ID if any.
llvm::Value *DeviceID = nullptr;
if (Device) {
@@ -10273,29 +10927,35 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
llvm::Constant *PointerNum =
CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
- llvm::Value *OffloadingArgs[] = {DeviceID,
+ // Source location for the ident struct
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
+
+ llvm::Value *OffloadingArgs[] = {RTLoc,
+ DeviceID,
PointerNum,
InputInfo.BasePointersArray.getPointer(),
InputInfo.PointersArray.getPointer(),
InputInfo.SizesArray.getPointer(),
- MapTypesArray};
+ MapTypesArray,
+ MapNamesArray,
+ InputInfo.MappersArray.getPointer()};
- // Select the right runtime function call for each expected standalone
+ // Select the right runtime function call for each standalone
// directive.
const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
RuntimeFunction RTLFn;
switch (D.getDirectiveKind()) {
case OMPD_target_enter_data:
- RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait
- : OMPRTL___tgt_target_data_begin;
+ RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait_mapper
+ : OMPRTL___tgt_target_data_begin_mapper;
break;
case OMPD_target_exit_data:
- RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait
- : OMPRTL___tgt_target_data_end;
+ RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait_mapper
+ : OMPRTL___tgt_target_data_end_mapper;
break;
case OMPD_target_update:
- RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait
- : OMPRTL___tgt_target_data_update;
+ RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait_mapper
+ : OMPRTL___tgt_target_data_update_mapper;
break;
case OMPD_parallel:
case OMPD_for:
@@ -10369,24 +11029,26 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
OffloadingArgs);
};
- auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray](
- CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
+ &MapNamesArray](CodeGenFunction &CGF,
+ PrePostActionTy &) {
// Fill up the arrays with all the mapped variables.
- MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
- MappableExprsHandler::MapValuesArrayTy Pointers;
- MappableExprsHandler::MapValuesArrayTy Sizes;
- MappableExprsHandler::MapFlagsArrayTy MapTypes;
+ MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
// Get map clause information.
MappableExprsHandler MEHandler(D, CGF);
- MEHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
+ MEHandler.generateAllInfo(CombinedInfo);
TargetDataInfo Info;
// Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
- emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
- Info.PointersArray, Info.SizesArray,
- Info.MapTypesArray, Info);
+ emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
+ /*IsNonContiguous=*/true);
+ bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
+ D.hasClausesOfKind<OMPNowaitClause>();
+ emitOffloadingArraysArgument(
+ CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
+ Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
+ {/*ForEndTask=*/false});
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
InputInfo.BasePointersArray =
Address(Info.BasePointersArray, CGM.getPointerAlign());
@@ -10394,8 +11056,10 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
Address(Info.PointersArray, CGM.getPointerAlign());
InputInfo.SizesArray =
Address(Info.SizesArray, CGM.getPointerAlign());
+ InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
MapTypesArray = Info.MapTypesArray;
- if (D.hasClausesOfKind<OMPDependClause>())
+ MapNamesArray = Info.MapNamesArray;
+ if (RequiresOuterTask)
CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
else
emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
@@ -11134,87 +11798,115 @@ Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
return CGF.GetAddrOfLocalVar(NativeParam);
}
-namespace {
-/// Cleanup action for allocate support.
-class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
-public:
- static const int CleanupArgs = 3;
-
-private:
- llvm::FunctionCallee RTLFn;
- llvm::Value *Args[CleanupArgs];
-
-public:
- OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
- ArrayRef<llvm::Value *> CallArgs)
- : RTLFn(RTLFn) {
- assert(CallArgs.size() == CleanupArgs &&
- "Size of arguments does not match.");
- std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
- }
- void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
- if (!CGF.HaveInsertPoint())
- return;
- CGF.EmitRuntimeCall(RTLFn, Args);
- }
-};
-} // namespace
-
Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) {
if (!VD)
return Address::invalid();
+ Address UntiedAddr = Address::invalid();
+ Address UntiedRealAddr = Address::invalid();
+ auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
+ if (It != FunctionToUntiedTaskStackMap.end()) {
+ const UntiedLocalVarsAddressesMap &UntiedData =
+ UntiedLocalVarsStack[It->second];
+ auto I = UntiedData.find(VD);
+ if (I != UntiedData.end()) {
+ UntiedAddr = I->second.first;
+ UntiedRealAddr = I->second.second;
+ }
+ }
const VarDecl *CVD = VD->getCanonicalDecl();
- if (!CVD->hasAttr<OMPAllocateDeclAttr>())
- return Address::invalid();
- const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
- // Use the default allocation.
- if ((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
- AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
- !AA->getAllocator())
- return Address::invalid();
- llvm::Value *Size;
- CharUnits Align = CGM.getContext().getDeclAlign(CVD);
- if (CVD->getType()->isVariablyModifiedType()) {
- Size = CGF.getTypeSize(CVD->getType());
- // Align the size: ((size + align - 1) / align) * align
- Size = CGF.Builder.CreateNUWAdd(
- Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
- Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
- Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
- } else {
- CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
- Size = CGM.getSize(Sz.alignTo(Align));
+ if (CVD->hasAttr<OMPAllocateDeclAttr>()) {
+ // Use the default allocation.
+ if (!isAllocatableDecl(VD))
+ return UntiedAddr;
+ llvm::Value *Size;
+ CharUnits Align = CGM.getContext().getDeclAlign(CVD);
+ if (CVD->getType()->isVariablyModifiedType()) {
+ Size = CGF.getTypeSize(CVD->getType());
+ // Align the size: ((size + align - 1) / align) * align
+ Size = CGF.Builder.CreateNUWAdd(
+ Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
+ Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
+ Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
+ } else {
+ CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
+ Size = CGM.getSize(Sz.alignTo(Align));
+ }
+ llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
+ const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
+ assert(AA->getAllocator() &&
+ "Expected allocator expression for non-default allocator.");
+ llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
+ // According to the standard, the original allocator type is a enum
+ // (integer). Convert to pointer type, if required.
+ Allocator = CGF.EmitScalarConversion(
+ Allocator, AA->getAllocator()->getType(), CGF.getContext().VoidPtrTy,
+ AA->getAllocator()->getExprLoc());
+ llvm::Value *Args[] = {ThreadID, Size, Allocator};
+
+ llvm::Value *Addr =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_alloc),
+ Args, getName({CVD->getName(), ".void.addr"}));
+ llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_free);
+ QualType Ty = CGM.getContext().getPointerType(CVD->getType());
+ Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr, CGF.ConvertTypeForMem(Ty), getName({CVD->getName(), ".addr"}));
+ if (UntiedAddr.isValid())
+ CGF.EmitStoreOfScalar(Addr, UntiedAddr, /*Volatile=*/false, Ty);
+
+ // Cleanup action for allocate support.
+ class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
+ llvm::FunctionCallee RTLFn;
+ unsigned LocEncoding;
+ Address Addr;
+ const Expr *Allocator;
+
+ public:
+ OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn, unsigned LocEncoding,
+ Address Addr, const Expr *Allocator)
+ : RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr),
+ Allocator(Allocator) {}
+ void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
+ if (!CGF.HaveInsertPoint())
+ return;
+ llvm::Value *Args[3];
+ Args[0] = CGF.CGM.getOpenMPRuntime().getThreadID(
+ CGF, SourceLocation::getFromRawEncoding(LocEncoding));
+ Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr.getPointer(), CGF.VoidPtrTy);
+ llvm::Value *AllocVal = CGF.EmitScalarExpr(Allocator);
+ // According to the standard, the original allocator type is a enum
+ // (integer). Convert to pointer type, if required.
+ AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
+ CGF.getContext().VoidPtrTy,
+ Allocator->getExprLoc());
+ Args[2] = AllocVal;
+
+ CGF.EmitRuntimeCall(RTLFn, Args);
+ }
+ };
+ Address VDAddr =
+ UntiedRealAddr.isValid() ? UntiedRealAddr : Address(Addr, Align);
+ CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(
+ NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(),
+ VDAddr, AA->getAllocator());
+ if (UntiedRealAddr.isValid())
+ if (auto *Region =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
+ Region->emitUntiedSwitch(CGF);
+ return VDAddr;
}
- llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
- assert(AA->getAllocator() &&
- "Expected allocator expression for non-default allocator.");
- llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
- // According to the standard, the original allocator type is a enum (integer).
- // Convert to pointer type, if required.
- if (Allocator->getType()->isIntegerTy())
- Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy);
- else if (Allocator->getType()->isPointerTy())
- Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator,
- CGM.VoidPtrTy);
- llvm::Value *Args[] = {ThreadID, Size, Allocator};
-
- llvm::Value *Addr =
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_alloc),
- Args, getName({CVD->getName(), ".void.addr"}));
- llvm::Value *FiniArgs[OMPAllocateCleanupTy::CleanupArgs] = {ThreadID, Addr,
- Allocator};
- llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_free);
+ return UntiedAddr;
+}
- CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
- llvm::makeArrayRef(FiniArgs));
- Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Addr,
- CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
- getName({CVD->getName(), ".addr"}));
- return Address(Addr, Align);
+bool CGOpenMPRuntime::isLocalVarInUntiedTask(CodeGenFunction &CGF,
+ const VarDecl *VD) const {
+ auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
+ if (It == FunctionToUntiedTaskStackMap.end())
+ return false;
+ return UntiedLocalVarsStack[It->second].count(VD) > 0;
}
CGOpenMPRuntime::NontemporalDeclsRAII::NontemporalDeclsRAII(
@@ -11249,6 +11941,24 @@ CGOpenMPRuntime::NontemporalDeclsRAII::~NontemporalDeclsRAII() {
CGM.getOpenMPRuntime().NontemporalDeclsStack.pop_back();
}
+CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::UntiedTaskLocalDeclsRAII(
+ CodeGenFunction &CGF,
+ const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
+ std::pair<Address, Address>> &LocalVars)
+ : CGM(CGF.CGM), NeedToPush(!LocalVars.empty()) {
+ if (!NeedToPush)
+ return;
+ CGM.getOpenMPRuntime().FunctionToUntiedTaskStackMap.try_emplace(
+ CGF.CurFn, CGM.getOpenMPRuntime().UntiedLocalVarsStack.size());
+ CGM.getOpenMPRuntime().UntiedLocalVarsStack.push_back(LocalVars);
+}
+
+CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::~UntiedTaskLocalDeclsRAII() {
+ if (!NeedToPush)
+ return;
+ CGM.getOpenMPRuntime().UntiedLocalVarsStack.pop_back();
+}
+
bool CGOpenMPRuntime::isNontemporalDecl(const ValueDecl *VD) const {
assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h
index eb22f155f5ef..b8bb6d85f005 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -105,6 +105,7 @@ struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
+ SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals;
struct DependData {
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
const Expr *IteratorExpr = nullptr;
@@ -245,6 +246,19 @@ public:
~NontemporalDeclsRAII();
};
+ /// Manages list of nontemporal decls for the specified directive.
+ class UntiedTaskLocalDeclsRAII {
+ CodeGenModule &CGM;
+ const bool NeedToPush;
+
+ public:
+ UntiedTaskLocalDeclsRAII(
+ CodeGenFunction &CGF,
+ const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
+ std::pair<Address, Address>> &LocalVars);
+ ~UntiedTaskLocalDeclsRAII();
+ };
+
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
@@ -292,6 +306,9 @@ protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
+ /// An OpenMP-IR-Builder instance.
+ llvm::OpenMPIRBuilder OMPBuilder;
+
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
@@ -372,21 +389,9 @@ protected:
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
- /// An OpenMP-IR-Builder instance.
- llvm::OpenMPIRBuilder OMPBuilder;
- /// Default const ident_t object used for initialization of all other
- /// ident_t objects.
- llvm::Constant *DefaultOpenMPPSource = nullptr;
- using FlagsTy = std::pair<unsigned, unsigned>;
- /// Map of flags and corresponding default locations.
- using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>;
- OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
- Address getOrCreateDefaultLocation(unsigned Flags);
-
- QualType IdentQTy;
- llvm::StructType *IdentTy = nullptr;
+
/// Map for SourceLocation and OpenMP runtime library debug locations.
- typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
+ typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
@@ -428,6 +433,8 @@ private:
std::tuple<QualType, const FieldDecl *,
const FieldDecl *, LValue>>>
LastprivateConditionalToTypes;
+ /// Maps function to the position of the untied task locals stack.
+ llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
@@ -606,7 +613,8 @@ private:
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
- StringRef ParentName, unsigned LineNum) const;
+ StringRef ParentName, unsigned LineNum,
+ bool IgnoreAddressId = false) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
@@ -715,6 +723,11 @@ private:
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
+ using UntiedLocalVarsAddressesMap =
+ llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
+ std::pair<Address, Address>>;
+ llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack;
+
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
@@ -912,6 +925,10 @@ public:
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
+ /// Get the function for the specified user-defined mapper. If it does not
+ /// exist, create one.
+ llvm::Function *
+ getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
@@ -1610,6 +1627,9 @@ public:
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
+ /// Set to true if Clang emits separate runtime calls for the beginning and
+ /// end of the region. These calls might have separate map type arrays.
+ bool SeparateBeginEndCalls = false;
public:
/// The array of base pointer passed to the runtime library.
@@ -1618,8 +1638,21 @@ public:
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
- /// The array of map types passed to the runtime library.
+ /// The array of map types passed to the runtime library for the beginning
+ /// of the region or for the entire region if there are no separate map
+ /// types for the region end.
llvm::Value *MapTypesArray = nullptr;
+ /// The array of map types passed to the runtime library for the end of the
+ /// region, or nullptr if there are no separate map types for the region
+ /// end.
+ llvm::Value *MapTypesArrayEnd = nullptr;
+ /// The array of user-defined mappers passed to the runtime library.
+ llvm::Value *MappersArray = nullptr;
+ /// The array of original declaration names of mapped pointers sent to the
+ /// runtime library for debugging
+ llvm::Value *MapNamesArray = nullptr;
+ /// Indicate whether any user-defined mapper exists.
+ bool HasMapper = false;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
@@ -1627,22 +1660,29 @@ public:
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
- explicit TargetDataInfo(bool RequiresDevicePointerInfo)
- : RequiresDevicePointerInfo(RequiresDevicePointerInfo) {}
+ explicit TargetDataInfo(bool RequiresDevicePointerInfo,
+ bool SeparateBeginEndCalls)
+ : RequiresDevicePointerInfo(RequiresDevicePointerInfo),
+ SeparateBeginEndCalls(SeparateBeginEndCalls) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
+ MapTypesArrayEnd = nullptr;
+ MapNamesArray = nullptr;
+ MappersArray = nullptr;
+ HasMapper = false;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
- MapTypesArray && NumberOfPtrs;
+ MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
+ bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
};
/// Emit the target data mapping code associated with \a D.
@@ -1851,6 +1891,9 @@ public:
/// Destroys user defined allocators specified in the uses_allocators clause.
void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
+
+ /// Returns true if the variable is a local variable in untied task.
+ bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const;
};
/// Class supports emissionof SIMD-only code.
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.cpp
new file mode 100644
index 000000000000..33d4ab838af1
--- /dev/null
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.cpp
@@ -0,0 +1,60 @@
+//===-- CGOpenMPRuntimeAMDGCN.cpp - Interface to OpenMP AMDGCN Runtimes --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a class for OpenMP runtime code generation specialized to
+// AMDGCN targets from generalized CGOpenMPRuntimeGPU class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGOpenMPRuntimeAMDGCN.h"
+#include "CGOpenMPRuntimeGPU.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/StmtOpenMP.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Cuda.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
+
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm::omp;
+
+CGOpenMPRuntimeAMDGCN::CGOpenMPRuntimeAMDGCN(CodeGenModule &CGM)
+ : CGOpenMPRuntimeGPU(CGM) {
+ if (!CGM.getLangOpts().OpenMPIsDevice)
+ llvm_unreachable("OpenMP AMDGCN can only handle device code.");
+}
+
+llvm::Value *CGOpenMPRuntimeAMDGCN::getGPUWarpSize(CodeGenFunction &CGF) {
+ CGBuilderTy &Bld = CGF.Builder;
+ // return constant compile-time target-specific warp size
+ unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
+ return Bld.getInt32(WarpSize);
+}
+
+llvm::Value *CGOpenMPRuntimeAMDGCN::getGPUThreadID(CodeGenFunction &CGF) {
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::Function *F =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::amdgcn_workitem_id_x);
+ return Bld.CreateCall(F, llvm::None, "nvptx_tid");
+}
+
+llvm::Value *CGOpenMPRuntimeAMDGCN::getGPUNumThreads(CodeGenFunction &CGF) {
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::Module *M = &CGF.CGM.getModule();
+ const char *LocSize = "__kmpc_amdgcn_gpu_num_threads";
+ llvm::Function *F = M->getFunction(LocSize);
+ if (!F) {
+ F = llvm::Function::Create(
+ llvm::FunctionType::get(CGF.Int32Ty, llvm::None, false),
+ llvm::GlobalVariable::ExternalLinkage, LocSize, &CGF.CGM.getModule());
+ }
+ return Bld.CreateCall(F, llvm::None, "nvptx_num_threads");
+}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.h b/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.h
new file mode 100644
index 000000000000..c1421261bfc1
--- /dev/null
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.h
@@ -0,0 +1,43 @@
+//===--- CGOpenMPRuntimeAMDGCN.h - Interface to OpenMP AMDGCN Runtimes ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a class for OpenMP runtime code generation specialized to
+// AMDGCN targets from generalized CGOpenMPRuntimeGPU class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMEAMDGCN_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMEAMDGCN_H
+
+#include "CGOpenMPRuntime.h"
+#include "CGOpenMPRuntimeGPU.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtOpenMP.h"
+
+namespace clang {
+namespace CodeGen {
+
+class CGOpenMPRuntimeAMDGCN final : public CGOpenMPRuntimeGPU {
+
+public:
+ explicit CGOpenMPRuntimeAMDGCN(CodeGenModule &CGM);
+
+ /// Get the GPU warp size.
+ llvm::Value *getGPUWarpSize(CodeGenFunction &CGF) override;
+
+ /// Get the id of the current thread on the GPU.
+ llvm::Value *getGPUThreadID(CodeGenFunction &CGF) override;
+
+ /// Get the maximum number of threads in a block of the GPU.
+ llvm::Value *getGPUNumThreads(CodeGenFunction &CGF) override;
+};
+
+} // namespace CodeGen
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMEAMDGCN_H
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
new file mode 100644
index 000000000000..9c8c7b83d1d2
--- /dev/null
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -0,0 +1,4864 @@
+//===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a generalized class for OpenMP runtime code generation
+// specialized by GPU targets NVPTX and AMDGCN.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGOpenMPRuntimeGPU.h"
+#include "CGOpenMPRuntimeNVPTX.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/StmtOpenMP.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Cuda.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Frontend/OpenMP/OMPGridValues.h"
+#include "llvm/IR/IntrinsicsNVPTX.h"
+
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm::omp;
+
+namespace {
+/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
+class NVPTXActionTy final : public PrePostActionTy {
+ llvm::FunctionCallee EnterCallee = nullptr;
+ ArrayRef<llvm::Value *> EnterArgs;
+ llvm::FunctionCallee ExitCallee = nullptr;
+ ArrayRef<llvm::Value *> ExitArgs;
+ bool Conditional = false;
+ llvm::BasicBlock *ContBlock = nullptr;
+
+public:
+ NVPTXActionTy(llvm::FunctionCallee EnterCallee,
+ ArrayRef<llvm::Value *> EnterArgs,
+ llvm::FunctionCallee ExitCallee,
+ ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
+ : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
+ ExitArgs(ExitArgs), Conditional(Conditional) {}
+ void Enter(CodeGenFunction &CGF) override {
+ llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
+ if (Conditional) {
+ llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
+ auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
+ ContBlock = CGF.createBasicBlock("omp_if.end");
+ // Generate the branch (If-stmt)
+ CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
+ CGF.EmitBlock(ThenBlock);
+ }
+ }
+ void Done(CodeGenFunction &CGF) {
+ // Emit the rest of blocks/branches
+ CGF.EmitBranch(ContBlock);
+ CGF.EmitBlock(ContBlock, true);
+ }
+ void Exit(CodeGenFunction &CGF) override {
+ CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
+ }
+};
+
+/// A class to track the execution mode when codegening directives within
+/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
+/// to the target region and used by containing directives such as 'parallel'
+/// to emit optimized code.
+class ExecutionRuntimeModesRAII {
+private:
+ CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
+ CGOpenMPRuntimeGPU::EM_Unknown;
+ CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
+ bool SavedRuntimeMode = false;
+ bool *RuntimeMode = nullptr;
+
+public:
+ /// Constructor for Non-SPMD mode.
+ ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
+ : ExecMode(ExecMode) {
+ SavedExecMode = ExecMode;
+ ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD;
+ }
+ /// Constructor for SPMD mode.
+ ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
+ bool &RuntimeMode, bool FullRuntimeMode)
+ : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
+ SavedExecMode = ExecMode;
+ SavedRuntimeMode = RuntimeMode;
+ ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
+ RuntimeMode = FullRuntimeMode;
+ }
+ ~ExecutionRuntimeModesRAII() {
+ ExecMode = SavedExecMode;
+ if (RuntimeMode)
+ *RuntimeMode = SavedRuntimeMode;
+ }
+};
+
+/// GPU Configuration: This information can be derived from cuda registers,
+/// however, providing compile time constants helps generate more efficient
+/// code. For all practical purposes this is fine because the configuration
+/// is the same for all known NVPTX architectures.
+enum MachineConfiguration : unsigned {
+ /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
+ /// specific Grid Values like GV_Warp_Size, GV_Warp_Size_Log2,
+ /// and GV_Warp_Size_Log2_Mask.
+
+ /// Global memory alignment for performance.
+ GlobalMemoryAlignment = 128,
+
+ /// Maximal size of the shared memory buffer.
+ SharedMemorySize = 128,
+};
+
+static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
+ RefExpr = RefExpr->IgnoreParens();
+ if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
+ const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ RefExpr = Base;
+ } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
+ const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ Base = TempOASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ RefExpr = Base;
+ }
+ RefExpr = RefExpr->IgnoreParenImpCasts();
+ if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
+ return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
+ const auto *ME = cast<MemberExpr>(RefExpr);
+ return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
+}
+
+
+static RecordDecl *buildRecordForGlobalizedVars(
+ ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
+ ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &MappedDeclsFields, int BufSize) {
+ using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
+ if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
+ return nullptr;
+ SmallVector<VarsDataTy, 4> GlobalizedVars;
+ for (const ValueDecl *D : EscapedDecls)
+ GlobalizedVars.emplace_back(
+ CharUnits::fromQuantity(std::max(
+ C.getDeclAlign(D).getQuantity(),
+ static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
+ D);
+ for (const ValueDecl *D : EscapedDeclsForTeams)
+ GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
+ llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
+ return L.first > R.first;
+ });
+
+ // Build struct _globalized_locals_ty {
+ // /* globalized vars */[WarSize] align (max(decl_align,
+ // GlobalMemoryAlignment))
+ // /* globalized vars */ for EscapedDeclsForTeams
+ // };
+ RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
+ GlobalizedRD->startDefinition();
+ llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
+ EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
+ for (const auto &Pair : GlobalizedVars) {
+ const ValueDecl *VD = Pair.second;
+ QualType Type = VD->getType();
+ if (Type->isLValueReferenceType())
+ Type = C.getPointerType(Type.getNonReferenceType());
+ else
+ Type = Type.getNonReferenceType();
+ SourceLocation Loc = VD->getLocation();
+ FieldDecl *Field;
+ if (SingleEscaped.count(VD)) {
+ Field = FieldDecl::Create(
+ C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
+ C.getTrivialTypeSourceInfo(Type, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ if (VD->hasAttrs()) {
+ for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
+ E(VD->getAttrs().end());
+ I != E; ++I)
+ Field->addAttr(*I);
+ }
+ } else {
+ llvm::APInt ArraySize(32, BufSize);
+ Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
+ 0);
+ Field = FieldDecl::Create(
+ C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
+ C.getTrivialTypeSourceInfo(Type, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
+ static_cast<CharUnits::QuantityType>(
+ GlobalMemoryAlignment)));
+ Field->addAttr(AlignedAttr::CreateImplicit(
+ C, /*IsAlignmentExpr=*/true,
+ IntegerLiteral::Create(C, Align,
+ C.getIntTypeForBitwidth(32, /*Signed=*/0),
+ SourceLocation()),
+ {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
+ }
+ GlobalizedRD->addDecl(Field);
+ MappedDeclsFields.try_emplace(VD, Field);
+ }
+ GlobalizedRD->completeDefinition();
+ return GlobalizedRD;
+}
+
+/// Get the list of variables that can escape their declaration context.
+class CheckVarsEscapingDeclContext final
+ : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
+ CodeGenFunction &CGF;
+ llvm::SetVector<const ValueDecl *> EscapedDecls;
+ llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
+ llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
+ RecordDecl *GlobalizedRD = nullptr;
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
+ bool AllEscaped = false;
+ bool IsForCombinedParallelRegion = false;
+
+ void markAsEscaped(const ValueDecl *VD) {
+ // Do not globalize declare target variables.
+ if (!isa<VarDecl>(VD) ||
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
+ return;
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ // Use user-specified allocation.
+ if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
+ return;
+ // Variables captured by value must be globalized.
+ if (auto *CSI = CGF.CapturedStmtInfo) {
+ if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
+ // Check if need to capture the variable that was already captured by
+ // value in the outer region.
+ if (!IsForCombinedParallelRegion) {
+ if (!FD->hasAttrs())
+ return;
+ const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
+ if (!Attr)
+ return;
+ if (((Attr->getCaptureKind() != OMPC_map) &&
+ !isOpenMPPrivate(Attr->getCaptureKind())) ||
+ ((Attr->getCaptureKind() == OMPC_map) &&
+ !FD->getType()->isAnyPointerType()))
+ return;
+ }
+ if (!FD->getType()->isReferenceType()) {
+ assert(!VD->getType()->isVariablyModifiedType() &&
+ "Parameter captured by value with variably modified type");
+ EscapedParameters.insert(VD);
+ } else if (!IsForCombinedParallelRegion) {
+ return;
+ }
+ }
+ }
+ if ((!CGF.CapturedStmtInfo ||
+ (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
+ VD->getType()->isReferenceType())
+ // Do not globalize variables with reference type.
+ return;
+ if (VD->getType()->isVariablyModifiedType())
+ EscapedVariableLengthDecls.insert(VD);
+ else
+ EscapedDecls.insert(VD);
+ }
+
+ void VisitValueDecl(const ValueDecl *VD) {
+ if (VD->getType()->isLValueReferenceType())
+ markAsEscaped(VD);
+ if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
+ if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
+ const bool SavedAllEscaped = AllEscaped;
+ AllEscaped = VD->getType()->isLValueReferenceType();
+ Visit(VarD->getInit());
+ AllEscaped = SavedAllEscaped;
+ }
+ }
+ }
+ void VisitOpenMPCapturedStmt(const CapturedStmt *S,
+ ArrayRef<OMPClause *> Clauses,
+ bool IsCombinedParallelRegion) {
+ if (!S)
+ return;
+ for (const CapturedStmt::Capture &C : S->captures()) {
+ if (C.capturesVariable() && !C.capturesVariableByCopy()) {
+ const ValueDecl *VD = C.getCapturedVar();
+ bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
+ if (IsCombinedParallelRegion) {
+ // Check if the variable is privatized in the combined construct and
+ // those private copies must be shared in the inner parallel
+ // directive.
+ IsForCombinedParallelRegion = false;
+ for (const OMPClause *C : Clauses) {
+ if (!isOpenMPPrivate(C->getClauseKind()) ||
+ C->getClauseKind() == OMPC_reduction ||
+ C->getClauseKind() == OMPC_linear ||
+ C->getClauseKind() == OMPC_private)
+ continue;
+ ArrayRef<const Expr *> Vars;
+ if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
+ Vars = PC->getVarRefs();
+ else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
+ Vars = PC->getVarRefs();
+ else
+ llvm_unreachable("Unexpected clause.");
+ for (const auto *E : Vars) {
+ const Decl *D =
+ cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
+ if (D == VD->getCanonicalDecl()) {
+ IsForCombinedParallelRegion = true;
+ break;
+ }
+ }
+ if (IsForCombinedParallelRegion)
+ break;
+ }
+ }
+ markAsEscaped(VD);
+ if (isa<OMPCapturedExprDecl>(VD))
+ VisitValueDecl(VD);
+ IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
+ }
+ }
+ }
+
+ void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
+ assert(!GlobalizedRD &&
+ "Record for globalized variables is built already.");
+ ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
+ unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
+ if (IsInTTDRegion)
+ EscapedDeclsForTeams = EscapedDecls.getArrayRef();
+ else
+ EscapedDeclsForParallel = EscapedDecls.getArrayRef();
+ GlobalizedRD = ::buildRecordForGlobalizedVars(
+ CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
+ MappedDeclsFields, WarpSize);
+ }
+
+public:
+ CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
+ ArrayRef<const ValueDecl *> TeamsReductions)
+ : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
+ }
+ virtual ~CheckVarsEscapingDeclContext() = default;
+ void VisitDeclStmt(const DeclStmt *S) {
+ if (!S)
+ return;
+ for (const Decl *D : S->decls())
+ if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
+ VisitValueDecl(VD);
+ }
+ void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
+ if (!D)
+ return;
+ if (!D->hasAssociatedStmt())
+ return;
+ if (const auto *S =
+ dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
+ // Do not analyze directives that do not actually require capturing,
+ // like `omp for` or `omp simd` directives.
+ llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
+ getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
+ if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
+ VisitStmt(S->getCapturedStmt());
+ return;
+ }
+ VisitOpenMPCapturedStmt(
+ S, D->clauses(),
+ CaptureRegions.back() == OMPD_parallel &&
+ isOpenMPDistributeDirective(D->getDirectiveKind()));
+ }
+ }
+ void VisitCapturedStmt(const CapturedStmt *S) {
+ if (!S)
+ return;
+ for (const CapturedStmt::Capture &C : S->captures()) {
+ if (C.capturesVariable() && !C.capturesVariableByCopy()) {
+ const ValueDecl *VD = C.getCapturedVar();
+ markAsEscaped(VD);
+ if (isa<OMPCapturedExprDecl>(VD))
+ VisitValueDecl(VD);
+ }
+ }
+ }
+ void VisitLambdaExpr(const LambdaExpr *E) {
+ if (!E)
+ return;
+ for (const LambdaCapture &C : E->captures()) {
+ if (C.capturesVariable()) {
+ if (C.getCaptureKind() == LCK_ByRef) {
+ const ValueDecl *VD = C.getCapturedVar();
+ markAsEscaped(VD);
+ if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
+ VisitValueDecl(VD);
+ }
+ }
+ }
+ }
+ void VisitBlockExpr(const BlockExpr *E) {
+ if (!E)
+ return;
+ for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
+ if (C.isByRef()) {
+ const VarDecl *VD = C.getVariable();
+ markAsEscaped(VD);
+ if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
+ VisitValueDecl(VD);
+ }
+ }
+ }
+ void VisitCallExpr(const CallExpr *E) {
+ if (!E)
+ return;
+ for (const Expr *Arg : E->arguments()) {
+ if (!Arg)
+ continue;
+ if (Arg->isLValue()) {
+ const bool SavedAllEscaped = AllEscaped;
+ AllEscaped = true;
+ Visit(Arg);
+ AllEscaped = SavedAllEscaped;
+ } else {
+ Visit(Arg);
+ }
+ }
+ Visit(E->getCallee());
+ }
+ void VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (!E)
+ return;
+ const ValueDecl *VD = E->getDecl();
+ if (AllEscaped)
+ markAsEscaped(VD);
+ if (isa<OMPCapturedExprDecl>(VD))
+ VisitValueDecl(VD);
+ else if (const auto *VarD = dyn_cast<VarDecl>(VD))
+ if (VarD->isInitCapture())
+ VisitValueDecl(VD);
+ }
+ void VisitUnaryOperator(const UnaryOperator *E) {
+ if (!E)
+ return;
+ if (E->getOpcode() == UO_AddrOf) {
+ const bool SavedAllEscaped = AllEscaped;
+ AllEscaped = true;
+ Visit(E->getSubExpr());
+ AllEscaped = SavedAllEscaped;
+ } else {
+ Visit(E->getSubExpr());
+ }
+ }
+ void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
+ if (!E)
+ return;
+ if (E->getCastKind() == CK_ArrayToPointerDecay) {
+ const bool SavedAllEscaped = AllEscaped;
+ AllEscaped = true;
+ Visit(E->getSubExpr());
+ AllEscaped = SavedAllEscaped;
+ } else {
+ Visit(E->getSubExpr());
+ }
+ }
+ void VisitExpr(const Expr *E) {
+ if (!E)
+ return;
+ bool SavedAllEscaped = AllEscaped;
+ if (!E->isLValue())
+ AllEscaped = false;
+ for (const Stmt *Child : E->children())
+ if (Child)
+ Visit(Child);
+ AllEscaped = SavedAllEscaped;
+ }
+ void VisitStmt(const Stmt *S) {
+ if (!S)
+ return;
+ for (const Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+ }
+
+ /// Returns the record that handles all the escaped local variables and used
+ /// instead of their original storage.
+ const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
+ if (!GlobalizedRD)
+ buildRecordForGlobalizedVars(IsInTTDRegion);
+ return GlobalizedRD;
+ }
+
+ /// Returns the field in the globalized record for the escaped variable.
+ const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
+ assert(GlobalizedRD &&
+ "Record for globalized variables must be generated already.");
+ auto I = MappedDeclsFields.find(VD);
+ if (I == MappedDeclsFields.end())
+ return nullptr;
+ return I->getSecond();
+ }
+
+ /// Returns the list of the escaped local variables/parameters.
+ ArrayRef<const ValueDecl *> getEscapedDecls() const {
+ return EscapedDecls.getArrayRef();
+ }
+
+ /// Checks if the escaped local variable is actually a parameter passed by
+ /// value.
+ const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
+ return EscapedParameters;
+ }
+
+ /// Returns the list of the escaped variables with the variably modified
+ /// types.
+ ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
+ return EscapedVariableLengthDecls.getArrayRef();
+ }
+};
+} // anonymous namespace
+
+/// Get the id of the warp in the block.
+/// We assume that the warp size is 32, which is always the case
+/// on the NVPTX device, to generate more efficient code.
+static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
+ CGBuilderTy &Bld = CGF.Builder;
+ unsigned LaneIDBits =
+ CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size_Log2);
+ auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
+}
+
+/// Get the id of the current lane in the Warp.
+/// We assume that the warp size is 32, which is always the case
+/// on the NVPTX device, to generate more efficient code.
+static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
+ CGBuilderTy &Bld = CGF.Builder;
+ unsigned LaneIDMask = CGF.getContext().getTargetInfo().getGridValue(
+ llvm::omp::GV_Warp_Size_Log2_Mask);
+ auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
+ "nvptx_lane_id");
+}
+
+/// Get the value of the thread_limit clause in the teams directive.
+/// For the 'generic' execution mode, the runtime encodes thread_limit in
+/// the launch parameters, always starting thread_limit+warpSize threads per
+/// CTA. The threads in the last warp are reserved for master execution.
+/// For the 'spmd' execution mode, all threads in a CTA are part of the team.
+static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
+ bool IsInSPMDExecutionMode = false) {
+ CGBuilderTy &Bld = CGF.Builder;
+ auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ return IsInSPMDExecutionMode
+ ? RT.getGPUNumThreads(CGF)
+ : Bld.CreateNUWSub(RT.getGPUNumThreads(CGF),
+ RT.getGPUWarpSize(CGF), "thread_limit");
+}
+
+/// Get the thread id of the OMP master thread.
+/// The master thread id is the first thread (lane) of the last warp in the
+/// GPU block. Warp size is assumed to be some power of 2.
+/// Thread id is 0 indexed.
+/// E.g: If NumThreads is 33, master id is 32.
+/// If NumThreads is 64, master id is 32.
+/// If NumThreads is 1024, master id is 992.
+static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
+ CGBuilderTy &Bld = CGF.Builder;
+ auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ llvm::Value *NumThreads = RT.getGPUNumThreads(CGF);
+ // We assume that the warp size is a power of 2.
+ llvm::Value *Mask = Bld.CreateNUWSub(RT.getGPUWarpSize(CGF), Bld.getInt32(1));
+
+ return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
+ Bld.CreateNot(Mask), "master_tid");
+}
+
+CGOpenMPRuntimeGPU::WorkerFunctionState::WorkerFunctionState(
+ CodeGenModule &CGM, SourceLocation Loc)
+ : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
+ Loc(Loc) {
+ createWorkerFunction(CGM);
+}
+
+void CGOpenMPRuntimeGPU::WorkerFunctionState::createWorkerFunction(
+ CodeGenModule &CGM) {
+ // Create an worker function with no arguments.
+
+ WorkerFn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ /*placeholder=*/"_worker", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
+ WorkerFn->setDoesNotRecurse();
+}
+
+CGOpenMPRuntimeGPU::ExecutionMode
+CGOpenMPRuntimeGPU::getExecutionMode() const {
+ return CurrentExecutionMode;
+}
+
+static CGOpenMPRuntimeGPU::DataSharingMode
+getDataSharingMode(CodeGenModule &CGM) {
+ return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
+ : CGOpenMPRuntimeGPU::Generic;
+}
+
+/// Check for inner (nested) SPMD construct, if any
+static bool hasNestedSPMDDirective(ASTContext &Ctx,
+ const OMPExecutableDirective &D) {
+ const auto *CS = D.getInnermostCapturedStmt();
+ const auto *Body =
+ CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
+ const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+
+ if (const auto *NestedDir =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
+ OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
+ switch (D.getDirectiveKind()) {
+ case OMPD_target:
+ if (isOpenMPParallelDirective(DKind))
+ return true;
+ if (DKind == OMPD_teams) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
+ if (!Body)
+ return false;
+ ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ if (isOpenMPParallelDirective(DKind))
+ return true;
+ }
+ }
+ return false;
+ case OMPD_target_teams:
+ return isOpenMPParallelDirective(DKind);
+ case OMPD_target_simd:
+ case OMPD_target_parallel:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_master:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_allocate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_master_taskloop:
+ case OMPD_master_taskloop_simd:
+ case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_master_taskloop_simd:
+ case OMPD_requires:
+ case OMPD_unknown:
+ default:
+ llvm_unreachable("Unexpected directive.");
+ }
+ }
+
+ return false;
+}
+
+static bool supportsSPMDExecutionMode(ASTContext &Ctx,
+ const OMPExecutableDirective &D) {
+ OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
+ switch (DirectiveKind) {
+ case OMPD_target:
+ case OMPD_target_teams:
+ return hasNestedSPMDDirective(Ctx, D);
+ case OMPD_target_parallel:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_simd:
+ case OMPD_target_teams_distribute_simd:
+ return true;
+ case OMPD_target_teams_distribute:
+ return false;
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_master:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_allocate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_master_taskloop:
+ case OMPD_master_taskloop_simd:
+ case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_master_taskloop_simd:
+ case OMPD_requires:
+ case OMPD_unknown:
+ default:
+ break;
+ }
+ llvm_unreachable(
+ "Unknown programming model for OpenMP directive on NVPTX target.");
+}
+
+/// Check if the directive is loops based and has schedule clause at all or has
+/// static scheduling.
+static bool hasStaticScheduling(const OMPExecutableDirective &D) {
+ assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
+ isOpenMPLoopDirective(D.getDirectiveKind()) &&
+ "Expected loop-based directive.");
+ return !D.hasClausesOfKind<OMPOrderedClause>() &&
+ (!D.hasClausesOfKind<OMPScheduleClause>() ||
+ llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
+ [](const OMPScheduleClause *C) {
+ return C->getScheduleKind() == OMPC_SCHEDULE_static;
+ }));
+}
+
+/// Check for inner (nested) lightweight runtime construct, if any
+static bool hasNestedLightweightDirective(ASTContext &Ctx,
+ const OMPExecutableDirective &D) {
+ assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
+ const auto *CS = D.getInnermostCapturedStmt();
+ const auto *Body =
+ CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
+ const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+
+ if (const auto *NestedDir =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
+ OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
+ switch (D.getDirectiveKind()) {
+ case OMPD_target:
+ if (isOpenMPParallelDirective(DKind) &&
+ isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
+ hasStaticScheduling(*NestedDir))
+ return true;
+ if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
+ return true;
+ if (DKind == OMPD_parallel) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
+ if (!Body)
+ return false;
+ ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ if (isOpenMPWorksharingDirective(DKind) &&
+ isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
+ return true;
+ }
+ } else if (DKind == OMPD_teams) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
+ if (!Body)
+ return false;
+ ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ if (isOpenMPParallelDirective(DKind) &&
+ isOpenMPWorksharingDirective(DKind) &&
+ isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
+ return true;
+ if (DKind == OMPD_parallel) {
+ Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
+ if (!Body)
+ return false;
+ ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ if (isOpenMPWorksharingDirective(DKind) &&
+ isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ case OMPD_target_teams:
+ if (isOpenMPParallelDirective(DKind) &&
+ isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
+ hasStaticScheduling(*NestedDir))
+ return true;
+ if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
+ return true;
+ if (DKind == OMPD_parallel) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
+ if (!Body)
+ return false;
+ ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ if (isOpenMPWorksharingDirective(DKind) &&
+ isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
+ return true;
+ }
+ }
+ return false;
+ case OMPD_target_parallel:
+ if (DKind == OMPD_simd)
+ return true;
+ return isOpenMPWorksharingDirective(DKind) &&
+ isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
+ case OMPD_target_teams_distribute:
+ case OMPD_target_simd:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_master:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_allocate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_master_taskloop:
+ case OMPD_master_taskloop_simd:
+ case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_master_taskloop_simd:
+ case OMPD_requires:
+ case OMPD_unknown:
+ default:
+ llvm_unreachable("Unexpected directive.");
+ }
+ }
+
+ return false;
+}
+
+/// Checks if the construct supports lightweight runtime. It must be SPMD
+/// construct + inner loop-based construct with static scheduling.
+static bool supportsLightweightRuntime(ASTContext &Ctx,
+ const OMPExecutableDirective &D) {
+ if (!supportsSPMDExecutionMode(Ctx, D))
+ return false;
+ OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
+ switch (DirectiveKind) {
+ case OMPD_target:
+ case OMPD_target_teams:
+ case OMPD_target_parallel:
+ return hasNestedLightweightDirective(Ctx, D);
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ // (Last|First)-privates must be shared in parallel region.
+ return hasStaticScheduling(D);
+ case OMPD_target_simd:
+ case OMPD_target_teams_distribute_simd:
+ return true;
+ case OMPD_target_teams_distribute:
+ return false;
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_master:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_allocate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_master_taskloop:
+ case OMPD_master_taskloop_simd:
+ case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_master_taskloop_simd:
+ case OMPD_requires:
+ case OMPD_unknown:
+ default:
+ break;
+ }
+ llvm_unreachable(
+ "Unknown programming model for OpenMP directive on NVPTX target.");
+}
+
+void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
+ StringRef ParentName,
+ llvm::Function *&OutlinedFn,
+ llvm::Constant *&OutlinedFnID,
+ bool IsOffloadEntry,
+ const RegionCodeGenTy &CodeGen) {
+ ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
+ EntryFunctionState EST;
+ WorkerFunctionState WST(CGM, D.getBeginLoc());
+ Work.clear();
+ WrapperFunctionsMap.clear();
+
+ // Emit target region as a standalone region.
+ class NVPTXPrePostActionTy : public PrePostActionTy {
+ CGOpenMPRuntimeGPU::EntryFunctionState &EST;
+ CGOpenMPRuntimeGPU::WorkerFunctionState &WST;
+
+ public:
+ NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST,
+ CGOpenMPRuntimeGPU::WorkerFunctionState &WST)
+ : EST(EST), WST(WST) {}
+ void Enter(CodeGenFunction &CGF) override {
+ auto &RT =
+ static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ RT.emitNonSPMDEntryHeader(CGF, EST, WST);
+ // Skip target region initialization.
+ RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
+ }
+ void Exit(CodeGenFunction &CGF) override {
+ auto &RT =
+ static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ RT.clearLocThreadIdInsertPt(CGF);
+ RT.emitNonSPMDEntryFooter(CGF, EST);
+ }
+ } Action(EST, WST);
+ CodeGen.setAction(Action);
+ IsInTTDRegion = true;
+ // Reserve place for the globalized memory.
+ GlobalizedRecords.emplace_back();
+ if (!KernelStaticGlobalized) {
+ KernelStaticGlobalized = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::UndefValue::get(CGM.VoidPtrTy),
+ "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
+ }
+ emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
+ IsOffloadEntry, CodeGen);
+ IsInTTDRegion = false;
+
+ // Now change the name of the worker function to correspond to this target
+ // region's entry function.
+ WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
+
+ // Create the worker function
+ emitWorkerFunction(WST);
+}
+
+// Setup NVPTX threads for master-worker OpenMP scheme.
+void CGOpenMPRuntimeGPU::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
+ EntryFunctionState &EST,
+ WorkerFunctionState &WST) {
+ CGBuilderTy &Bld = CGF.Builder;
+
+ llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
+ llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
+ llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
+ EST.ExitBB = CGF.createBasicBlock(".exit");
+
+ auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ llvm::Value *IsWorker =
+ Bld.CreateICmpULT(RT.getGPUThreadID(CGF), getThreadLimit(CGF));
+ Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
+
+ CGF.EmitBlock(WorkerBB);
+ emitCall(CGF, WST.Loc, WST.WorkerFn);
+ CGF.EmitBranch(EST.ExitBB);
+
+ CGF.EmitBlock(MasterCheckBB);
+ llvm::Value *IsMaster =
+ Bld.CreateICmpEQ(RT.getGPUThreadID(CGF), getMasterThreadID(CGF));
+ Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
+
+ CGF.EmitBlock(MasterBB);
+ IsInTargetMasterThreadRegion = true;
+ // SEQUENTIAL (MASTER) REGION START
+ // First action in sequential region:
+ // Initialize the state of the OpenMP runtime library on the GPU.
+ // TODO: Optimize runtime initialization and pass in correct value.
+ llvm::Value *Args[] = {getThreadLimit(CGF),
+ Bld.getInt16(/*RequiresOMPRuntime=*/1)};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_kernel_init),
+ Args);
+
+ // For data sharing, we need to initialize the stack.
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_data_sharing_init_stack));
+
+ emitGenericVarsProlog(CGF, WST.Loc);
+}
+
+void CGOpenMPRuntimeGPU::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
+ EntryFunctionState &EST) {
+ IsInTargetMasterThreadRegion = false;
+ if (!CGF.HaveInsertPoint())
+ return;
+
+ emitGenericVarsEpilog(CGF);
+
+ if (!EST.ExitBB)
+ EST.ExitBB = CGF.createBasicBlock(".exit");
+
+ llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
+ CGF.EmitBranch(TerminateBB);
+
+ CGF.EmitBlock(TerminateBB);
+ // Signal termination condition.
+ // TODO: Optimize runtime initialization and pass in correct value.
+ llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_kernel_deinit),
+ Args);
+ // Barrier to terminate worker threads.
+ syncCTAThreads(CGF);
+ // Master thread jumps to exit point.
+ CGF.EmitBranch(EST.ExitBB);
+
+ CGF.EmitBlock(EST.ExitBB);
+ EST.ExitBB = nullptr;
+}
+
+void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
+ StringRef ParentName,
+ llvm::Function *&OutlinedFn,
+ llvm::Constant *&OutlinedFnID,
+ bool IsOffloadEntry,
+ const RegionCodeGenTy &CodeGen) {
+ ExecutionRuntimeModesRAII ModeRAII(
+ CurrentExecutionMode, RequiresFullRuntime,
+ CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
+ !supportsLightweightRuntime(CGM.getContext(), D));
+ EntryFunctionState EST;
+
+ // Emit target region as a standalone region.
+ class NVPTXPrePostActionTy : public PrePostActionTy {
+ CGOpenMPRuntimeGPU &RT;
+ CGOpenMPRuntimeGPU::EntryFunctionState &EST;
+ const OMPExecutableDirective &D;
+
+ public:
+ NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
+ CGOpenMPRuntimeGPU::EntryFunctionState &EST,
+ const OMPExecutableDirective &D)
+ : RT(RT), EST(EST), D(D) {}
+ void Enter(CodeGenFunction &CGF) override {
+ RT.emitSPMDEntryHeader(CGF, EST, D);
+ // Skip target region initialization.
+ RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
+ }
+ void Exit(CodeGenFunction &CGF) override {
+ RT.clearLocThreadIdInsertPt(CGF);
+ RT.emitSPMDEntryFooter(CGF, EST);
+ }
+ } Action(*this, EST, D);
+ CodeGen.setAction(Action);
+ IsInTTDRegion = true;
+ // Reserve place for the globalized memory.
+ GlobalizedRecords.emplace_back();
+ if (!KernelStaticGlobalized) {
+ KernelStaticGlobalized = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::UndefValue::get(CGM.VoidPtrTy),
+ "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
+ }
+ emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
+ IsOffloadEntry, CodeGen);
+ IsInTTDRegion = false;
+}
+
+void CGOpenMPRuntimeGPU::emitSPMDEntryHeader(
+ CodeGenFunction &CGF, EntryFunctionState &EST,
+ const OMPExecutableDirective &D) {
+ CGBuilderTy &Bld = CGF.Builder;
+
+ // Setup BBs in entry function.
+ llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
+ EST.ExitBB = CGF.createBasicBlock(".exit");
+
+ llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
+ /*RequiresOMPRuntime=*/
+ Bld.getInt16(RequiresFullRuntime ? 1 : 0)};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_spmd_kernel_init),
+ Args);
+
+ if (RequiresFullRuntime) {
+ // For data sharing, we need to initialize the stack.
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_data_sharing_init_stack_spmd));
+ }
+
+ CGF.EmitBranch(ExecuteBB);
+
+ CGF.EmitBlock(ExecuteBB);
+
+ IsInTargetMasterThreadRegion = true;
+}
+
+void CGOpenMPRuntimeGPU::emitSPMDEntryFooter(CodeGenFunction &CGF,
+ EntryFunctionState &EST) {
+ IsInTargetMasterThreadRegion = false;
+ if (!CGF.HaveInsertPoint())
+ return;
+
+ if (!EST.ExitBB)
+ EST.ExitBB = CGF.createBasicBlock(".exit");
+
+ llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
+ CGF.EmitBranch(OMPDeInitBB);
+
+ CGF.EmitBlock(OMPDeInitBB);
+ // DeInitialize the OMP state in the runtime; called by all active threads.
+ llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
+ CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_spmd_kernel_deinit_v2),
+ Args);
+ CGF.EmitBranch(EST.ExitBB);
+
+ CGF.EmitBlock(EST.ExitBB);
+ EST.ExitBB = nullptr;
+}
+
+// Create a unique global variable to indicate the execution mode of this target
+// region. The execution mode is either 'generic', or 'spmd' depending on the
+// target directive. This variable is picked up by the offload library to setup
+// the device appropriately before kernel launch. If the execution mode is
+// 'generic', the runtime reserves one warp for the master, otherwise, all
+// warps participate in parallel work.
+static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
+ bool Mode) {
+ auto *GVMode =
+ new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
+ llvm::GlobalValue::WeakAnyLinkage,
+ llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
+ Twine(Name, "_exec_mode"));
+ CGM.addCompilerUsedGlobal(GVMode);
+}
+
+void CGOpenMPRuntimeGPU::emitWorkerFunction(WorkerFunctionState &WST) {
+ ASTContext &Ctx = CGM.getContext();
+
+ CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
+ CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
+ WST.Loc, WST.Loc);
+ emitWorkerLoop(CGF, WST);
+ CGF.FinishFunction();
+}
+
+void CGOpenMPRuntimeGPU::emitWorkerLoop(CodeGenFunction &CGF,
+ WorkerFunctionState &WST) {
+ //
+ // The workers enter this loop and wait for parallel work from the master.
+ // When the master encounters a parallel region it sets up the work + variable
+ // arguments, and wakes up the workers. The workers first check to see if
+ // they are required for the parallel region, i.e., within the # of requested
+ // parallel threads. The activated workers load the variable arguments and
+ // execute the parallel work.
+ //
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
+ llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
+ llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
+ llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
+ llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
+
+ CGF.EmitBranch(AwaitBB);
+
+ // Workers wait for work from master.
+ CGF.EmitBlock(AwaitBB);
+ // Wait for parallel work
+ syncCTAThreads(CGF);
+
+ Address WorkFn =
+ CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
+ Address ExecStatus =
+ CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
+ CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
+ CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
+
+ // TODO: Optimize runtime initialization and pass in correct value.
+ llvm::Value *Args[] = {WorkFn.getPointer()};
+ llvm::Value *Ret =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_kernel_parallel),
+ Args);
+ Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
+
+ // On termination condition (workid == 0), exit loop.
+ llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
+ llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
+ Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
+
+ // Activate requested workers.
+ CGF.EmitBlock(SelectWorkersBB);
+ llvm::Value *IsActive =
+ Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
+ Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
+
+ // Signal start of parallel region.
+ CGF.EmitBlock(ExecuteBB);
+ // Skip initialization.
+ setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
+
+ // Process work items: outlined parallel functions.
+ for (llvm::Function *W : Work) {
+ // Try to match this outlined function.
+ llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
+
+ llvm::Value *WorkFnMatch =
+ Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
+
+ llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
+ llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
+ Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
+
+ // Execute this outlined function.
+ CGF.EmitBlock(ExecuteFNBB);
+
+ // Insert call to work function via shared wrapper. The shared
+ // wrapper takes two arguments:
+ // - the parallelism level;
+ // - the thread ID;
+ emitCall(CGF, WST.Loc, W,
+ {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
+
+ // Go to end of parallel region.
+ CGF.EmitBranch(TerminateBB);
+
+ CGF.EmitBlock(CheckNextBB);
+ }
+ // Default case: call to outlined function through pointer if the target
+ // region makes a declare target call that may contain an orphaned parallel
+ // directive.
+ auto *ParallelFnTy =
+ llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
+ /*isVarArg=*/false);
+ llvm::Value *WorkFnCast =
+ Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
+ // Insert call to work function via shared wrapper. The shared
+ // wrapper takes two arguments:
+ // - the parallelism level;
+ // - the thread ID;
+ emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
+ {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
+ // Go to end of parallel region.
+ CGF.EmitBranch(TerminateBB);
+
+ // Signal end of parallel region.
+ CGF.EmitBlock(TerminateBB);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_kernel_end_parallel),
+ llvm::None);
+ CGF.EmitBranch(BarrierBB);
+
+ // All active and inactive workers wait at a barrier after parallel region.
+ CGF.EmitBlock(BarrierBB);
+ // Barrier after parallel region.
+ syncCTAThreads(CGF);
+ CGF.EmitBranch(AwaitBB);
+
+ // Exit target region.
+ CGF.EmitBlock(ExitBB);
+ // Skip initialization.
+ clearLocThreadIdInsertPt(CGF);
+}
+
+void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
+ llvm::Constant *Addr,
+ uint64_t Size, int32_t,
+ llvm::GlobalValue::LinkageTypes) {
+ // TODO: Add support for global variables on the device after declare target
+ // support.
+ if (!isa<llvm::Function>(Addr))
+ return;
+ llvm::Module &M = CGM.getModule();
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+
+ // Get "nvvm.annotations" metadata node
+ llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
+
+ llvm::Metadata *MDVals[] = {
+ llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
+ // Append metadata to nvvm.annotations
+ MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
+}
+
+void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
+ const OMPExecutableDirective &D, StringRef ParentName,
+ llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
+ bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
+ if (!IsOffloadEntry) // Nothing to do.
+ return;
+
+ assert(!ParentName.empty() && "Invalid target region parent name!");
+
+ bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
+ if (Mode)
+ emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
+ CodeGen);
+ else
+ emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
+ CodeGen);
+
+ setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
+}
+
+namespace {
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+/// Enum for accesseing the reserved_2 field of the ident_t struct.
+enum ModeFlagsTy : unsigned {
+ /// Bit set to 1 when in SPMD mode.
+ KMP_IDENT_SPMD_MODE = 0x01,
+ /// Bit set to 1 when a simplified runtime is used.
+ KMP_IDENT_SIMPLE_RT_MODE = 0x02,
+ LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
+};
+
+/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
+static const ModeFlagsTy UndefinedMode =
+ (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
+} // anonymous namespace
+
+unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
+ switch (getExecutionMode()) {
+ case EM_SPMD:
+ if (requiresFullRuntime())
+ return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
+ return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
+ case EM_NonSPMD:
+ assert(requiresFullRuntime() && "Expected full runtime.");
+ return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
+ case EM_Unknown:
+ return UndefinedMode;
+ }
+ llvm_unreachable("Unknown flags are requested.");
+}
+
+CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
+ : CGOpenMPRuntime(CGM, "_", "$") {
+ if (!CGM.getLangOpts().OpenMPIsDevice)
+ llvm_unreachable("OpenMP NVPTX can only handle device code.");
+}
+
+void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
+ ProcBindKind ProcBind,
+ SourceLocation Loc) {
+ // Do nothing in case of SPMD mode and L0 parallel.
+ if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
+ return;
+
+ CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
+}
+
+void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
+ llvm::Value *NumThreads,
+ SourceLocation Loc) {
+ // Do nothing in case of SPMD mode and L0 parallel.
+ if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
+ return;
+
+ CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
+}
+
+void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
+ const Expr *NumTeams,
+ const Expr *ThreadLimit,
+ SourceLocation Loc) {}
+
+llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
+ const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ // Emit target region as a standalone region.
+ class NVPTXPrePostActionTy : public PrePostActionTy {
+ bool &IsInParallelRegion;
+ bool PrevIsInParallelRegion;
+
+ public:
+ NVPTXPrePostActionTy(bool &IsInParallelRegion)
+ : IsInParallelRegion(IsInParallelRegion) {}
+ void Enter(CodeGenFunction &CGF) override {
+ PrevIsInParallelRegion = IsInParallelRegion;
+ IsInParallelRegion = true;
+ }
+ void Exit(CodeGenFunction &CGF) override {
+ IsInParallelRegion = PrevIsInParallelRegion;
+ }
+ } Action(IsInParallelRegion);
+ CodeGen.setAction(Action);
+ bool PrevIsInTTDRegion = IsInTTDRegion;
+ IsInTTDRegion = false;
+ bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
+ IsInTargetMasterThreadRegion = false;
+ auto *OutlinedFun =
+ cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
+ D, ThreadIDVar, InnermostKind, CodeGen));
+ IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
+ IsInTTDRegion = PrevIsInTTDRegion;
+ if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
+ !IsInParallelRegion) {
+ llvm::Function *WrapperFun =
+ createParallelDataSharingWrapper(OutlinedFun, D);
+ WrapperFunctionsMap[OutlinedFun] = WrapperFun;
+ }
+
+ return OutlinedFun;
+}
+
+/// Get list of lastprivate variables from the teams distribute ... or
+/// teams {distribute ...} directives.
+static void
+getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
+ assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
+ "expected teams directive.");
+ const OMPExecutableDirective *Dir = &D;
+ if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
+ if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
+ Ctx,
+ D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true))) {
+ Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
+ if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
+ Dir = nullptr;
+ }
+ }
+ if (!Dir)
+ return;
+ for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
+ for (const Expr *E : C->getVarRefs())
+ Vars.push_back(getPrivateItem(E));
+ }
+}
+
+/// Get list of reduction variables from the teams ... directives.
+static void
+getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
+ assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
+ "expected teams directive.");
+ for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
+ for (const Expr *E : C->privates())
+ Vars.push_back(getPrivateItem(E));
+ }
+}
+
+llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
+ const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ SourceLocation Loc = D.getBeginLoc();
+
+ const RecordDecl *GlobalizedRD = nullptr;
+ llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
+ unsigned WarpSize = CGM.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
+ // Globalize team reductions variable unconditionally in all modes.
+ if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
+ getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
+ if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
+ getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
+ if (!LastPrivatesReductions.empty()) {
+ GlobalizedRD = ::buildRecordForGlobalizedVars(
+ CGM.getContext(), llvm::None, LastPrivatesReductions,
+ MappedDeclsFields, WarpSize);
+ }
+ } else if (!LastPrivatesReductions.empty()) {
+ assert(!TeamAndReductions.first &&
+ "Previous team declaration is not expected.");
+ TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
+ std::swap(TeamAndReductions.second, LastPrivatesReductions);
+ }
+
+ // Emit target region as a standalone region.
+ class NVPTXPrePostActionTy : public PrePostActionTy {
+ SourceLocation &Loc;
+ const RecordDecl *GlobalizedRD;
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &MappedDeclsFields;
+
+ public:
+ NVPTXPrePostActionTy(
+ SourceLocation &Loc, const RecordDecl *GlobalizedRD,
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &MappedDeclsFields)
+ : Loc(Loc), GlobalizedRD(GlobalizedRD),
+ MappedDeclsFields(MappedDeclsFields) {}
+ void Enter(CodeGenFunction &CGF) override {
+ auto &Rt =
+ static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ if (GlobalizedRD) {
+ auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
+ I->getSecond().GlobalRecord = GlobalizedRD;
+ I->getSecond().MappedParams =
+ std::make_unique<CodeGenFunction::OMPMapVars>();
+ DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
+ for (const auto &Pair : MappedDeclsFields) {
+ assert(Pair.getFirst()->isCanonicalDecl() &&
+ "Expected canonical declaration");
+ Data.insert(std::make_pair(Pair.getFirst(),
+ MappedVarData(Pair.getSecond(),
+ /*IsOnePerTeam=*/true)));
+ }
+ }
+ Rt.emitGenericVarsProlog(CGF, Loc);
+ }
+ void Exit(CodeGenFunction &CGF) override {
+ static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
+ .emitGenericVarsEpilog(CGF);
+ }
+ } Action(Loc, GlobalizedRD, MappedDeclsFields);
+ CodeGen.setAction(Action);
+ llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
+ D, ThreadIDVar, InnermostKind, CodeGen);
+
+ return OutlinedFun;
+}
+
+void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ bool WithSPMDCheck) {
+ if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
+ getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
+ return;
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
+ if (I == FunctionGlobalizedDecls.end())
+ return;
+ if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
+ QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
+ QualType SecGlobalRecTy;
+
+ // Recover pointer to this function's global record. The runtime will
+ // handle the specifics of the allocation of the memory.
+ // Use actual memory size of the record including the padding
+ // for alignment purposes.
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
+ unsigned GlobalRecordSize =
+ CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
+ GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
+
+ llvm::PointerType *GlobalRecPtrTy =
+ CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
+ llvm::Value *GlobalRecCastAddr;
+ llvm::Value *IsTTD = nullptr;
+ if (!IsInTTDRegion &&
+ (WithSPMDCheck ||
+ getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
+ llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
+ llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
+ if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *PL = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_parallel_level),
+ {RTLoc, ThreadID});
+ IsTTD = Bld.CreateIsNull(PL);
+ }
+ llvm::Value *IsSPMD = Bld.CreateIsNotNull(
+ CGF.EmitNounwindRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_is_spmd_exec_mode)));
+ Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(SPMDBB);
+ Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
+ CharUnits::fromQuantity(Alignment));
+ CGF.EmitBranch(ExitBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(NonSPMDBB);
+ llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
+ if (const RecordDecl *SecGlobalizedVarsRecord =
+ I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
+ SecGlobalRecTy =
+ CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
+
+ // Recover pointer to this function's global record. The runtime will
+ // handle the specifics of the allocation of the memory.
+ // Use actual memory size of the record including the padding
+ // for alignment purposes.
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
+ unsigned GlobalRecordSize =
+ CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
+ GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
+ Size = Bld.CreateSelect(
+ IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
+ }
+ // TODO: allow the usage of shared memory to be controlled by
+ // the user, for now, default to global.
+ llvm::Value *GlobalRecordSizeArg[] = {
+ Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
+ llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_data_sharing_coalesced_push_stack),
+ GlobalRecordSizeArg);
+ GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ GlobalRecValue, GlobalRecPtrTy);
+ CGF.EmitBlock(ExitBB);
+ auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
+ /*NumReservedValues=*/2, "_select_stack");
+ Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
+ Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
+ GlobalRecCastAddr = Phi;
+ I->getSecond().GlobalRecordAddr = Phi;
+ I->getSecond().IsInSPMDModeFlag = IsSPMD;
+ } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
+ assert(GlobalizedRecords.back().Records.size() < 2 &&
+ "Expected less than 2 globalized records: one for target and one "
+ "for teams.");
+ unsigned Offset = 0;
+ for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
+ QualType RDTy = CGM.getContext().getRecordType(RD);
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
+ unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
+ Offset =
+ llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
+ }
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
+ Offset = llvm::alignTo(Offset, Alignment);
+ GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
+ ++GlobalizedRecords.back().RegionCounter;
+ if (GlobalizedRecords.back().Records.size() == 1) {
+ assert(KernelStaticGlobalized &&
+ "Kernel static pointer must be initialized already.");
+ auto *UseSharedMemory = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, nullptr,
+ "_openmp_static_kernel$is_shared");
+ UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
+ /*DestWidth=*/16, /*Signed=*/0);
+ llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
+ Address(UseSharedMemory,
+ CGM.getContext().getTypeAlignInChars(Int16Ty)),
+ /*Volatile=*/false, Int16Ty, Loc);
+ auto *StaticGlobalized = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
+ llvm::GlobalValue::CommonLinkage, nullptr);
+ auto *RecSize = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, nullptr,
+ "_openmp_static_kernel$size");
+ RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ llvm::Value *Ld = CGF.EmitLoadOfScalar(
+ Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
+ CGM.getContext().getSizeType(), Loc);
+ llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ KernelStaticGlobalized, CGM.VoidPtrPtrTy);
+ llvm::Value *GlobalRecordSizeArg[] = {
+ llvm::ConstantInt::get(
+ CGM.Int16Ty,
+ getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0),
+ StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_get_team_static_memory),
+ GlobalRecordSizeArg);
+ GlobalizedRecords.back().Buffer = StaticGlobalized;
+ GlobalizedRecords.back().RecSize = RecSize;
+ GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
+ GlobalizedRecords.back().Loc = Loc;
+ }
+ assert(KernelStaticGlobalized && "Global address must be set already.");
+ Address FrameAddr = CGF.EmitLoadOfPointer(
+ Address(KernelStaticGlobalized, CGM.getPointerAlign()),
+ CGM.getContext()
+ .getPointerType(CGM.getContext().VoidPtrTy)
+ .castAs<PointerType>());
+ llvm::Value *GlobalRecValue =
+ Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
+ I->getSecond().GlobalRecordAddr = GlobalRecValue;
+ I->getSecond().IsInSPMDModeFlag = nullptr;
+ GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
+ } else {
+ // TODO: allow the usage of shared memory to be controlled by
+ // the user, for now, default to global.
+ bool UseSharedMemory =
+ IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
+ llvm::Value *GlobalRecordSizeArg[] = {
+ llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
+ CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)};
+ llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(),
+ IsInTTDRegion ? OMPRTL___kmpc_data_sharing_push_stack
+ : OMPRTL___kmpc_data_sharing_coalesced_push_stack),
+ GlobalRecordSizeArg);
+ GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ GlobalRecValue, GlobalRecPtrTy);
+ I->getSecond().GlobalRecordAddr = GlobalRecValue;
+ I->getSecond().IsInSPMDModeFlag = nullptr;
+ }
+ LValue Base =
+ CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
+
+ // Emit the "global alloca" which is a GEP from the global declaration
+ // record using the pointer returned by the runtime.
+ LValue SecBase;
+ decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
+ if (IsTTD) {
+ SecIt = I->getSecond().SecondaryLocalVarData->begin();
+ llvm::PointerType *SecGlobalRecPtrTy =
+ CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
+ SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
+ Bld.CreatePointerBitCastOrAddrSpaceCast(
+ I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
+ SecGlobalRecTy);
+ }
+ for (auto &Rec : I->getSecond().LocalVarData) {
+ bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
+ llvm::Value *ParValue;
+ if (EscapedParam) {
+ const auto *VD = cast<VarDecl>(Rec.first);
+ LValue ParLVal =
+ CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
+ ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
+ }
+ LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
+ // Emit VarAddr basing on lane-id if required.
+ QualType VarTy;
+ if (Rec.second.IsOnePerTeam) {
+ VarTy = Rec.second.FD->getType();
+ } else {
+ llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
+ VarAddr.getAddress(CGF).getPointer(),
+ {Bld.getInt32(0), getNVPTXLaneID(CGF)});
+ VarTy =
+ Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
+ VarAddr = CGF.MakeAddrLValue(
+ Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
+ AlignmentSource::Decl);
+ }
+ Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
+ if (!IsInTTDRegion &&
+ (WithSPMDCheck ||
+ getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
+ assert(I->getSecond().IsInSPMDModeFlag &&
+ "Expected unknown execution mode or required SPMD check.");
+ if (IsTTD) {
+ assert(SecIt->second.IsOnePerTeam &&
+ "Secondary glob data must be one per team.");
+ LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
+ VarAddr.setAddress(
+ Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF),
+ VarAddr.getPointer(CGF)),
+ VarAddr.getAlignment()));
+ Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
+ }
+ Address GlobalPtr = Rec.second.PrivateAddr;
+ Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
+ Rec.second.PrivateAddr = Address(
+ Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
+ LocalAddr.getPointer(), GlobalPtr.getPointer()),
+ LocalAddr.getAlignment());
+ }
+ if (EscapedParam) {
+ const auto *VD = cast<VarDecl>(Rec.first);
+ CGF.EmitStoreOfScalar(ParValue, VarAddr);
+ I->getSecond().MappedParams->setVarAddr(CGF, VD,
+ VarAddr.getAddress(CGF));
+ }
+ if (IsTTD)
+ ++SecIt;
+ }
+ }
+ for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
+ // Recover pointer to this function's global record. The runtime will
+ // handle the specifics of the allocation of the memory.
+ // Use actual memory size of the record including the padding
+ // for alignment purposes.
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::Value *Size = CGF.getTypeSize(VD->getType());
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ Size = Bld.CreateNUWAdd(
+ Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
+ llvm::Value *AlignVal =
+ llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
+ Size = Bld.CreateUDiv(Size, AlignVal);
+ Size = Bld.CreateNUWMul(Size, AlignVal);
+ // TODO: allow the usage of shared memory to be controlled by
+ // the user, for now, default to global.
+ llvm::Value *GlobalRecordSizeArg[] = {
+ Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
+ llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_data_sharing_coalesced_push_stack),
+ GlobalRecordSizeArg);
+ llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
+ LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
+ CGM.getContext().getDeclAlign(VD),
+ AlignmentSource::Decl);
+ I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
+ Base.getAddress(CGF));
+ I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
+ }
+ I->getSecond().MappedParams->apply(CGF);
+}
+
+void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
+ bool WithSPMDCheck) {
+ if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
+ getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
+ return;
+
+ const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
+ if (I != FunctionGlobalizedDecls.end()) {
+ I->getSecond().MappedParams->restore(CGF);
+ if (!CGF.HaveInsertPoint())
+ return;
+ for (llvm::Value *Addr :
+ llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
+ Addr);
+ }
+ if (I->getSecond().GlobalRecordAddr) {
+ if (!IsInTTDRegion &&
+ (WithSPMDCheck ||
+ getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
+ llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
+ Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(NonSPMDBB);
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
+ CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
+ CGF.EmitBlock(ExitBB);
+ } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
+ assert(GlobalizedRecords.back().RegionCounter > 0 &&
+ "region counter must be > 0.");
+ --GlobalizedRecords.back().RegionCounter;
+ // Emit the restore function only in the target region.
+ if (GlobalizedRecords.back().RegionCounter == 0) {
+ QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
+ /*DestWidth=*/16, /*Signed=*/0);
+ llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
+ Address(GlobalizedRecords.back().UseSharedMemory,
+ CGM.getContext().getTypeAlignInChars(Int16Ty)),
+ /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
+ llvm::Value *Args[] = {
+ llvm::ConstantInt::get(
+ CGM.Int16Ty,
+ getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0),
+ IsInSharedMemory};
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_restore_team_static_memory),
+ Args);
+ }
+ } else {
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
+ I->getSecond().GlobalRecordAddr);
+ }
+ }
+ }
+}
+
+void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ SourceLocation Loc,
+ llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars) {
+ if (!CGF.HaveInsertPoint())
+ return;
+
+ Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
+ /*Name=*/".zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
+ OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
+ OutlinedFnArgs.push_back(ZeroAddr.getPointer());
+ OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
+ emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
+}
+
+void CGOpenMPRuntimeGPU::emitParallelCall(
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
+ if (!CGF.HaveInsertPoint())
+ return;
+
+ if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
+ emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
+ else
+ emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
+}
+
+void CGOpenMPRuntimeGPU::emitNonSPMDParallelCall(
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
+ llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
+
+ // Force inline this outlined function at its call site.
+ Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
+
+ // Ensure we do not inline the function. This is trivially true for the ones
+ // passed to __kmpc_fork_call but the ones calles in serialized regions
+ // could be inlined. This is not a perfect but it is closer to the invariant
+ // we want, namely, every data environment starts with a new function.
+ // TODO: We should pass the if condition to the runtime function and do the
+ // handling there. Much cleaner code.
+ cast<llvm::Function>(OutlinedFn)->addFnAttr(llvm::Attribute::NoInline);
+
+ Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
+ /*Name=*/".zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ // ThreadId for serialized parallels is 0.
+ Address ThreadIDAddr = ZeroAddr;
+ auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr](
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+
+ Address ZeroAddr =
+ CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
+ /*Name=*/".bound.zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
+ OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
+ OutlinedFnArgs.push_back(ZeroAddr.getPointer());
+ OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
+ emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
+ };
+ auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+
+ RegionCodeGenTy RCG(CodeGen);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *Args[] = {RTLoc, ThreadID};
+
+ NVPTXActionTy Action(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_serialized_parallel),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_serialized_parallel),
+ Args);
+ RCG.setAction(Action);
+ RCG(CGF);
+ };
+
+ auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::Function *WFn = WrapperFunctionsMap[Fn];
+ assert(WFn && "Wrapper function does not exist!");
+ llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
+
+ // Prepare for parallel region. Indicate the outlined function.
+ llvm::Value *Args[] = {ID};
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_kernel_prepare_parallel),
+ Args);
+
+ // Create a private scope that will globalize the arguments
+ // passed from the outside of the target region.
+ CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
+
+ // There's something to share.
+ if (!CapturedVars.empty()) {
+ // Prepare for parallel region. Indicate the outlined function.
+ Address SharedArgs =
+ CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
+ llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
+
+ llvm::Value *DataSharingArgs[] = {
+ SharedArgsPtr,
+ llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_begin_sharing_variables),
+ DataSharingArgs);
+
+ // Store variable address in a list of references to pass to workers.
+ unsigned Idx = 0;
+ ASTContext &Ctx = CGF.getContext();
+ Address SharedArgListAddress = CGF.EmitLoadOfPointer(
+ SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
+ .castAs<PointerType>());
+ for (llvm::Value *V : CapturedVars) {
+ Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
+ llvm::Value *PtrV;
+ if (V->getType()->isIntegerTy())
+ PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
+ else
+ PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
+ CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
+ Ctx.getPointerType(Ctx.VoidPtrTy));
+ ++Idx;
+ }
+ }
+
+ // Activate workers. This barrier is used by the master to signal
+ // work for the workers.
+ syncCTAThreads(CGF);
+
+ // OpenMP [2.5, Parallel Construct, p.49]
+ // There is an implied barrier at the end of a parallel region. After the
+ // end of a parallel region, only the master thread of the team resumes
+ // execution of the enclosing task region.
+ //
+ // The master waits at this barrier until all workers are done.
+ syncCTAThreads(CGF);
+
+ if (!CapturedVars.empty())
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_sharing_variables));
+
+ // Remember for post-processing in worker loop.
+ Work.emplace_back(WFn);
+ };
+
+ auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
+ if (IsInParallelRegion) {
+ SeqGen(CGF, Action);
+ } else if (IsInTargetMasterThreadRegion) {
+ L0ParallelGen(CGF, Action);
+ } else {
+ // Check for master and then parallelism:
+ // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
+ // Serialized execution.
+ // } else {
+ // Worker call.
+ // }
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
+ llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
+ llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
+ llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
+ llvm::Value *IsSPMD = Bld.CreateIsNotNull(
+ CGF.EmitNounwindRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_is_spmd_exec_mode)));
+ Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ParallelCheckBB);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *PL = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_parallel_level),
+ {RTLoc, ThreadID});
+ llvm::Value *Res = Bld.CreateIsNotNull(PL);
+ Bld.CreateCondBr(Res, SeqBB, MasterBB);
+ CGF.EmitBlock(SeqBB);
+ SeqGen(CGF, Action);
+ CGF.EmitBranch(ExitBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(MasterBB);
+ L0ParallelGen(CGF, Action);
+ CGF.EmitBranch(ExitBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ // Emit the continuation block for code after the if.
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+ }
+ };
+
+ if (IfCond) {
+ emitIfClause(CGF, IfCond, LNParallelGen, SeqGen);
+ } else {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ RegionCodeGenTy ThenRCG(LNParallelGen);
+ ThenRCG(CGF);
+ }
+}
+
+void CGOpenMPRuntimeGPU::emitSPMDParallelCall(
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
+ // Just call the outlined function to execute the parallel region.
+ // OutlinedFn(&GTid, &zero, CapturedStruct);
+ //
+ llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
+
+ Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
+ /*Name=*/".zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ // ThreadId for serialized parallels is 0.
+ Address ThreadIDAddr = ZeroAddr;
+ auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr](
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+
+ Address ZeroAddr =
+ CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
+ /*Name=*/".bound.zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
+ OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
+ OutlinedFnArgs.push_back(ZeroAddr.getPointer());
+ OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
+ emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
+ };
+ auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+
+ RegionCodeGenTy RCG(CodeGen);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *Args[] = {RTLoc, ThreadID};
+
+ NVPTXActionTy Action(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_serialized_parallel),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_serialized_parallel),
+ Args);
+ RCG.setAction(Action);
+ RCG(CGF);
+ };
+
+ if (IsInTargetMasterThreadRegion) {
+ // In the worker need to use the real thread id.
+ ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
+ RegionCodeGenTy RCG(CodeGen);
+ RCG(CGF);
+ } else {
+ // If we are not in the target region, it is definitely L2 parallelism or
+ // more, because for SPMD mode we always has L1 parallel level, sowe don't
+ // need to check for orphaned directives.
+ RegionCodeGenTy RCG(SeqGen);
+ RCG(CGF);
+ }
+}
+
+void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
+ // Always emit simple barriers!
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
+ // This function does not use parameters, so we can emit just default values.
+ llvm::Value *Args[] = {
+ llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(getIdentTyPointerTy())),
+ llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd),
+ Args);
+}
+
+void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPDirectiveKind Kind, bool,
+ bool) {
+ // Always emit simple barriers!
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_cancel_barrier(loc, thread_id);
+ unsigned Flags = getDefaultFlagsForBarriers(Kind);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
+ getThreadID(CGF, Loc)};
+
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_barrier),
+ Args);
+}
+
+void CGOpenMPRuntimeGPU::emitCriticalRegion(
+ CodeGenFunction &CGF, StringRef CriticalName,
+ const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
+ const Expr *Hint) {
+ llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
+ llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
+ llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
+
+ auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+
+ // Get the mask of active threads in the warp.
+ llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask));
+ // Fetch team-local id of the thread.
+ llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
+
+ // Get the width of the team.
+ llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
+
+ // Initialize the counter variable for the loop.
+ QualType Int32Ty =
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
+ Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
+ LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
+ CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
+ /*isInit=*/true);
+
+ // Block checks if loop counter exceeds upper bound.
+ CGF.EmitBlock(LoopBB);
+ llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
+ llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
+ CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
+
+ // Block tests which single thread should execute region, and which threads
+ // should go straight to synchronisation point.
+ CGF.EmitBlock(TestBB);
+ CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
+ llvm::Value *CmpThreadToCounter =
+ CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
+ CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
+
+ // Block emits the body of the critical region.
+ CGF.EmitBlock(BodyBB);
+
+ // Output the critical statement.
+ CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
+ Hint);
+
+ // After the body surrounded by the critical region, the single executing
+ // thread will jump to the synchronisation point.
+ // Block waits for all threads in current team to finish then increments the
+ // counter variable and returns to the loop.
+ CGF.EmitBlock(SyncBB);
+ // Reconverge active threads in the warp.
+ (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_syncwarp),
+ Mask);
+
+ llvm::Value *IncCounterVal =
+ CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
+ CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
+ CGF.EmitBranch(LoopBB);
+
+ // Block that is reached when all threads in the team complete the region.
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+/// Cast value to the specified type.
+static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
+ QualType ValTy, QualType CastTy,
+ SourceLocation Loc) {
+ assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
+ "Cast type must sized.");
+ assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
+ "Val type must sized.");
+ llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
+ if (ValTy == CastTy)
+ return Val;
+ if (CGF.getContext().getTypeSizeInChars(ValTy) ==
+ CGF.getContext().getTypeSizeInChars(CastTy))
+ return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
+ if (CastTy->isIntegerType() && ValTy->isIntegerType())
+ return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
+ CastTy->hasSignedIntegerRepresentation());
+ Address CastItem = CGF.CreateMemTemp(CastTy);
+ Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
+ CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
+ LValueBaseInfo(AlignmentSource::Type),
+ TBAAAccessInfo());
+ return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
+ LValueBaseInfo(AlignmentSource::Type),
+ TBAAAccessInfo());
+}
+
+/// This function creates calls to one of two shuffle functions to copy
+/// variables between lanes in a warp.
+static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
+ llvm::Value *Elem,
+ QualType ElemType,
+ llvm::Value *Offset,
+ SourceLocation Loc) {
+ CodeGenModule &CGM = CGF.CGM;
+ CGBuilderTy &Bld = CGF.Builder;
+ CGOpenMPRuntimeGPU &RT =
+ *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
+ llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
+
+ CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
+ assert(Size.getQuantity() <= 8 &&
+ "Unsupported bitwidth in shuffle instruction.");
+
+ RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
+ ? OMPRTL___kmpc_shuffle_int32
+ : OMPRTL___kmpc_shuffle_int64;
+
+ // Cast all types to 32- or 64-bit values before calling shuffle routines.
+ QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
+ Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
+ llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
+ llvm::Value *WarpSize =
+ Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
+
+ llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
+ {ElemCast, Offset, WarpSize});
+
+ return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
+}
+
+static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
+ Address DestAddr, QualType ElemType,
+ llvm::Value *Offset, SourceLocation Loc) {
+ CGBuilderTy &Bld = CGF.Builder;
+
+ CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
+ // Create the loop over the big sized data.
+ // ptr = (void*)Elem;
+ // ptrEnd = (void*) Elem + 1;
+ // Step = 8;
+ // while (ptr + Step < ptrEnd)
+ // shuffle((int64_t)*ptr);
+ // Step = 4;
+ // while (ptr + Step < ptrEnd)
+ // shuffle((int32_t)*ptr);
+ // ...
+ Address ElemPtr = DestAddr;
+ Address Ptr = SrcAddr;
+ Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
+ for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
+ if (Size < CharUnits::fromQuantity(IntSize))
+ continue;
+ QualType IntType = CGF.getContext().getIntTypeForBitwidth(
+ CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
+ /*Signed=*/1);
+ llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
+ Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
+ ElemPtr =
+ Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
+ if (Size.getQuantity() / IntSize > 1) {
+ llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
+ llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
+ CGF.EmitBlock(PreCondBB);
+ llvm::PHINode *PhiSrc =
+ Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
+ PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
+ llvm::PHINode *PhiDest =
+ Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
+ PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
+ Ptr = Address(PhiSrc, Ptr.getAlignment());
+ ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
+ llvm::Value *PtrDiff = Bld.CreatePtrDiff(
+ PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
+ Ptr.getPointer(), CGF.VoidPtrTy));
+ Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
+ ThenBB, ExitBB);
+ CGF.EmitBlock(ThenBB);
+ llvm::Value *Res = createRuntimeShuffleFunction(
+ CGF,
+ CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
+ LValueBaseInfo(AlignmentSource::Type),
+ TBAAAccessInfo()),
+ IntType, Offset, Loc);
+ CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
+ LValueBaseInfo(AlignmentSource::Type),
+ TBAAAccessInfo());
+ Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
+ Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
+ PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
+ PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
+ CGF.EmitBranch(PreCondBB);
+ CGF.EmitBlock(ExitBB);
+ } else {
+ llvm::Value *Res = createRuntimeShuffleFunction(
+ CGF,
+ CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
+ LValueBaseInfo(AlignmentSource::Type),
+ TBAAAccessInfo()),
+ IntType, Offset, Loc);
+ CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
+ LValueBaseInfo(AlignmentSource::Type),
+ TBAAAccessInfo());
+ Ptr = Bld.CreateConstGEP(Ptr, 1);
+ ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
+ }
+ Size = Size % IntSize;
+ }
+}
+
+namespace {
+enum CopyAction : unsigned {
+ // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
+ // the warp using shuffle instructions.
+ RemoteLaneToThread,
+ // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
+ ThreadCopy,
+ // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
+ ThreadToScratchpad,
+ // ScratchpadToThread: Copy from a scratchpad array in global memory
+ // containing team-reduced data to a thread's stack.
+ ScratchpadToThread,
+};
+} // namespace
+
+struct CopyOptionsTy {
+ llvm::Value *RemoteLaneOffset;
+ llvm::Value *ScratchpadIndex;
+ llvm::Value *ScratchpadWidth;
+};
+
+/// Emit instructions to copy a Reduce list, which contains partially
+/// aggregated values, in the specified direction.
+static void emitReductionListCopy(
+ CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
+ ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
+ CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
+
+ CodeGenModule &CGM = CGF.CGM;
+ ASTContext &C = CGM.getContext();
+ CGBuilderTy &Bld = CGF.Builder;
+
+ llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
+ llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
+ llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
+
+ // Iterates, element-by-element, through the source Reduce list and
+ // make a copy.
+ unsigned Idx = 0;
+ unsigned Size = Privates.size();
+ for (const Expr *Private : Privates) {
+ Address SrcElementAddr = Address::invalid();
+ Address DestElementAddr = Address::invalid();
+ Address DestElementPtrAddr = Address::invalid();
+ // Should we shuffle in an element from a remote lane?
+ bool ShuffleInElement = false;
+ // Set to true to update the pointer in the dest Reduce list to a
+ // newly created element.
+ bool UpdateDestListPtr = false;
+ // Increment the src or dest pointer to the scratchpad, for each
+ // new element.
+ bool IncrScratchpadSrc = false;
+ bool IncrScratchpadDest = false;
+
+ switch (Action) {
+ case RemoteLaneToThread: {
+ // Step 1.1: Get the address for the src element in the Reduce list.
+ Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
+ SrcElementAddr = CGF.EmitLoadOfPointer(
+ SrcElementPtrAddr,
+ C.getPointerType(Private->getType())->castAs<PointerType>());
+
+ // Step 1.2: Create a temporary to store the element in the destination
+ // Reduce list.
+ DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
+ DestElementAddr =
+ CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
+ ShuffleInElement = true;
+ UpdateDestListPtr = true;
+ break;
+ }
+ case ThreadCopy: {
+ // Step 1.1: Get the address for the src element in the Reduce list.
+ Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
+ SrcElementAddr = CGF.EmitLoadOfPointer(
+ SrcElementPtrAddr,
+ C.getPointerType(Private->getType())->castAs<PointerType>());
+
+ // Step 1.2: Get the address for dest element. The destination
+ // element has already been created on the thread's stack.
+ DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
+ DestElementAddr = CGF.EmitLoadOfPointer(
+ DestElementPtrAddr,
+ C.getPointerType(Private->getType())->castAs<PointerType>());
+ break;
+ }
+ case ThreadToScratchpad: {
+ // Step 1.1: Get the address for the src element in the Reduce list.
+ Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
+ SrcElementAddr = CGF.EmitLoadOfPointer(
+ SrcElementPtrAddr,
+ C.getPointerType(Private->getType())->castAs<PointerType>());
+
+ // Step 1.2: Get the address for dest element:
+ // address = base + index * ElementSizeInChars.
+ llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
+ llvm::Value *CurrentOffset =
+ Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
+ llvm::Value *ScratchPadElemAbsolutePtrVal =
+ Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
+ ScratchPadElemAbsolutePtrVal =
+ Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
+ DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
+ C.getTypeAlignInChars(Private->getType()));
+ IncrScratchpadDest = true;
+ break;
+ }
+ case ScratchpadToThread: {
+ // Step 1.1: Get the address for the src element in the scratchpad.
+ // address = base + index * ElementSizeInChars.
+ llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
+ llvm::Value *CurrentOffset =
+ Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
+ llvm::Value *ScratchPadElemAbsolutePtrVal =
+ Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
+ ScratchPadElemAbsolutePtrVal =
+ Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
+ SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
+ C.getTypeAlignInChars(Private->getType()));
+ IncrScratchpadSrc = true;
+
+ // Step 1.2: Create a temporary to store the element in the destination
+ // Reduce list.
+ DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
+ DestElementAddr =
+ CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
+ UpdateDestListPtr = true;
+ break;
+ }
+ }
+
+ // Regardless of src and dest of copy, we emit the load of src
+ // element as this is required in all directions
+ SrcElementAddr = Bld.CreateElementBitCast(
+ SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
+ DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
+ SrcElementAddr.getElementType());
+
+ // Now that all active lanes have read the element in the
+ // Reduce list, shuffle over the value from the remote lane.
+ if (ShuffleInElement) {
+ shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
+ RemoteLaneOffset, Private->getExprLoc());
+ } else {
+ switch (CGF.getEvaluationKind(Private->getType())) {
+ case TEK_Scalar: {
+ llvm::Value *Elem = CGF.EmitLoadOfScalar(
+ SrcElementAddr, /*Volatile=*/false, Private->getType(),
+ Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
+ TBAAAccessInfo());
+ // Store the source element value to the dest element address.
+ CGF.EmitStoreOfScalar(
+ Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
+ LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
+ break;
+ }
+ case TEK_Complex: {
+ CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
+ CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
+ Private->getExprLoc());
+ CGF.EmitStoreOfComplex(
+ Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
+ /*isInit=*/false);
+ break;
+ }
+ case TEK_Aggregate:
+ CGF.EmitAggregateCopy(
+ CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
+ CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
+ Private->getType(), AggValueSlot::DoesNotOverlap);
+ break;
+ }
+ }
+
+ // Step 3.1: Modify reference in dest Reduce list as needed.
+ // Modifying the reference in Reduce list to point to the newly
+ // created element. The element is live in the current function
+ // scope and that of functions it invokes (i.e., reduce_function).
+ // RemoteReduceData[i] = (void*)&RemoteElem
+ if (UpdateDestListPtr) {
+ CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
+ DestElementAddr.getPointer(), CGF.VoidPtrTy),
+ DestElementPtrAddr, /*Volatile=*/false,
+ C.VoidPtrTy);
+ }
+
+ // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
+ // address of the next element in scratchpad memory, unless we're currently
+ // processing the last one. Memory alignment is also taken care of here.
+ if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
+ llvm::Value *ScratchpadBasePtr =
+ IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
+ llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
+ ScratchpadBasePtr = Bld.CreateNUWAdd(
+ ScratchpadBasePtr,
+ Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
+
+ // Take care of global memory alignment for performance
+ ScratchpadBasePtr = Bld.CreateNUWSub(
+ ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
+ ScratchpadBasePtr = Bld.CreateUDiv(
+ ScratchpadBasePtr,
+ llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
+ ScratchpadBasePtr = Bld.CreateNUWAdd(
+ ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
+ ScratchpadBasePtr = Bld.CreateNUWMul(
+ ScratchpadBasePtr,
+ llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
+
+ if (IncrScratchpadDest)
+ DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
+ else /* IncrScratchpadSrc = true */
+ SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
+ }
+
+ ++Idx;
+ }
+}
+
+/// This function emits a helper that gathers Reduce lists from the first
+/// lane of every active warp to lanes in the first warp.
+///
+/// void inter_warp_copy_func(void* reduce_data, num_warps)
+/// shared smem[warp_size];
+/// For all data entries D in reduce_data:
+/// sync
+/// If (I am the first lane in each warp)
+/// Copy my local D to smem[warp_id]
+/// sync
+/// if (I am the first warp)
+/// Copy smem[thread_id] to my local D
+static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
+ ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy,
+ SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
+ llvm::Module &M = CGM.getModule();
+
+ // ReduceList: thread local Reduce list.
+ // At the stage of the computation when this function is called, partially
+ // aggregated values reside in the first lane of every active warp.
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ // NumWarps: number of warps active in the parallel region. This could
+ // be smaller than 32 (max warps in a CTA) for partial block reduction.
+ ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.getIntTypeForBitwidth(32, /* Signed */ true),
+ ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(&ReduceListArg);
+ Args.push_back(&NumWarpsArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
+ llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_inter_warp_copy_func", &M);
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ // This array is used as a medium to transfer, one reduce element at a time,
+ // the data from the first lane of every warp to lanes in the first warp
+ // in order to perform the final step of a reduction in a parallel region
+ // (reduction across warps). The array is placed in NVPTX __shared__ memory
+ // for reduced latency, as well as to have a distinct copy for concurrently
+ // executing target regions. The array is declared with common linkage so
+ // as to be shared across compilation units.
+ StringRef TransferMediumName =
+ "__openmp_nvptx_data_transfer_temporary_storage";
+ llvm::GlobalVariable *TransferMedium =
+ M.getGlobalVariable(TransferMediumName);
+ unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
+ if (!TransferMedium) {
+ auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
+ unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
+ TransferMedium = new llvm::GlobalVariable(
+ M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage,
+ llvm::UndefValue::get(Ty), TransferMediumName,
+ /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
+ SharedAddressSpace);
+ CGM.addCompilerUsedGlobal(TransferMedium);
+ }
+
+ auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ // Get the CUDA thread id of the current OpenMP thread on the GPU.
+ llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
+ // nvptx_lane_id = nvptx_id % warpsize
+ llvm::Value *LaneID = getNVPTXLaneID(CGF);
+ // nvptx_warp_id = nvptx_id / warpsize
+ llvm::Value *WarpID = getNVPTXWarpID(CGF);
+
+ Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ Address LocalReduceList(
+ Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(
+ AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
+ LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
+ CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
+ CGF.getPointerAlign());
+
+ unsigned Idx = 0;
+ for (const Expr *Private : Privates) {
+ //
+ // Warp master copies reduce element to transfer medium in __shared__
+ // memory.
+ //
+ unsigned RealTySize =
+ C.getTypeSizeInChars(Private->getType())
+ .alignTo(C.getTypeAlignInChars(Private->getType()))
+ .getQuantity();
+ for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
+ unsigned NumIters = RealTySize / TySize;
+ if (NumIters == 0)
+ continue;
+ QualType CType = C.getIntTypeForBitwidth(
+ C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
+ llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
+ CharUnits Align = CharUnits::fromQuantity(TySize);
+ llvm::Value *Cnt = nullptr;
+ Address CntAddr = Address::invalid();
+ llvm::BasicBlock *PrecondBB = nullptr;
+ llvm::BasicBlock *ExitBB = nullptr;
+ if (NumIters > 1) {
+ CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
+ CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
+ /*Volatile=*/false, C.IntTy);
+ PrecondBB = CGF.createBasicBlock("precond");
+ ExitBB = CGF.createBasicBlock("exit");
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(PrecondBB);
+ Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
+ llvm::Value *Cmp =
+ Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
+ Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
+ CGF.EmitBlock(BodyBB);
+ }
+ // kmpc_barrier.
+ CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
+ /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
+ llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
+ llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
+
+ // if (lane_id == 0)
+ llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
+ Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
+ CGF.EmitBlock(ThenBB);
+
+ // Reduce element = LocalReduceList[i]
+ Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
+ llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
+ ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
+ // elemptr = ((CopyType*)(elemptrptr)) + I
+ Address ElemPtr = Address(ElemPtrPtr, Align);
+ ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
+ if (NumIters > 1) {
+ ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
+ ElemPtr.getAlignment());
+ }
+
+ // Get pointer to location in transfer medium.
+ // MediumPtr = &medium[warp_id]
+ llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
+ TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
+ Address MediumPtr(MediumPtrVal, Align);
+ // Casting to actual data type.
+ // MediumPtr = (CopyType*)MediumPtrAddr;
+ MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
+
+ // elem = *elemptr
+ //*MediumPtr = elem
+ llvm::Value *Elem = CGF.EmitLoadOfScalar(
+ ElemPtr, /*Volatile=*/false, CType, Loc,
+ LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
+ // Store the source element value to the dest element address.
+ CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
+ LValueBaseInfo(AlignmentSource::Type),
+ TBAAAccessInfo());
+
+ Bld.CreateBr(MergeBB);
+
+ CGF.EmitBlock(ElseBB);
+ Bld.CreateBr(MergeBB);
+
+ CGF.EmitBlock(MergeBB);
+
+ // kmpc_barrier.
+ CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
+ /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+
+ //
+ // Warp 0 copies reduce element from transfer medium.
+ //
+ llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
+ llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
+ llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
+
+ Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
+ llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
+ AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
+
+ // Up to 32 threads in warp 0 are active.
+ llvm::Value *IsActiveThread =
+ Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
+ Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
+
+ CGF.EmitBlock(W0ThenBB);
+
+ // SrcMediumPtr = &medium[tid]
+ llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
+ TransferMedium,
+ {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
+ Address SrcMediumPtr(SrcMediumPtrVal, Align);
+ // SrcMediumVal = *SrcMediumPtr;
+ SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
+
+ // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
+ Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
+ llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
+ TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
+ Address TargetElemPtr = Address(TargetElemPtrVal, Align);
+ TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
+ if (NumIters > 1) {
+ TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
+ TargetElemPtr.getAlignment());
+ }
+
+ // *TargetElemPtr = SrcMediumVal;
+ llvm::Value *SrcMediumValue =
+ CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
+ CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
+ CType);
+ Bld.CreateBr(W0MergeBB);
+
+ CGF.EmitBlock(W0ElseBB);
+ Bld.CreateBr(W0MergeBB);
+
+ CGF.EmitBlock(W0MergeBB);
+
+ if (NumIters > 1) {
+ Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
+ CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
+ CGF.EmitBranch(PrecondBB);
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ExitBB);
+ }
+ RealTySize %= TySize;
+ }
+ ++Idx;
+ }
+
+ CGF.FinishFunction();
+ return Fn;
+}
+
+/// Emit a helper that reduces data across two OpenMP threads (lanes)
+/// in the same warp. It uses shuffle instructions to copy over data from
+/// a remote lane's stack. The reduction algorithm performed is specified
+/// by the fourth parameter.
+///
+/// Algorithm Versions.
+/// Full Warp Reduce (argument value 0):
+/// This algorithm assumes that all 32 lanes are active and gathers
+/// data from these 32 lanes, producing a single resultant value.
+/// Contiguous Partial Warp Reduce (argument value 1):
+/// This algorithm assumes that only a *contiguous* subset of lanes
+/// are active. This happens for the last warp in a parallel region
+/// when the user specified num_threads is not an integer multiple of
+/// 32. This contiguous subset always starts with the zeroth lane.
+/// Partial Warp Reduce (argument value 2):
+/// This algorithm gathers data from any number of lanes at any position.
+/// All reduced values are stored in the lowest possible lane. The set
+/// of problems every algorithm addresses is a super set of those
+/// addressable by algorithms with a lower version number. Overhead
+/// increases as algorithm version increases.
+///
+/// Terminology
+/// Reduce element:
+/// Reduce element refers to the individual data field with primitive
+/// data types to be combined and reduced across threads.
+/// Reduce list:
+/// Reduce list refers to a collection of local, thread-private
+/// reduce elements.
+/// Remote Reduce list:
+/// Remote Reduce list refers to a collection of remote (relative to
+/// the current thread) reduce elements.
+///
+/// We distinguish between three states of threads that are important to
+/// the implementation of this function.
+/// Alive threads:
+/// Threads in a warp executing the SIMT instruction, as distinguished from
+/// threads that are inactive due to divergent control flow.
+/// Active threads:
+/// The minimal set of threads that has to be alive upon entry to this
+/// function. The computation is correct iff active threads are alive.
+/// Some threads are alive but they are not active because they do not
+/// contribute to the computation in any useful manner. Turning them off
+/// may introduce control flow overheads without any tangible benefits.
+/// Effective threads:
+/// In order to comply with the argument requirements of the shuffle
+/// function, we must keep all lanes holding data alive. But at most
+/// half of them perform value aggregation; we refer to this half of
+/// threads as effective. The other half is simply handing off their
+/// data.
+///
+/// Procedure
+/// Value shuffle:
+/// In this step active threads transfer data from higher lane positions
+/// in the warp to lower lane positions, creating Remote Reduce list.
+/// Value aggregation:
+/// In this step, effective threads combine their thread local Reduce list
+/// with Remote Reduce list and store the result in the thread local
+/// Reduce list.
+/// Value copy:
+/// In this step, we deal with the assumption made by algorithm 2
+/// (i.e. contiguity assumption). When we have an odd number of lanes
+/// active, say 2k+1, only k threads will be effective and therefore k
+/// new values will be produced. However, the Reduce list owned by the
+/// (2k+1)th thread is ignored in the value aggregation. Therefore
+/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
+/// that the contiguity assumption still holds.
+static llvm::Function *emitShuffleAndReduceFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
+
+ // Thread local Reduce list used to host the values of data to be reduced.
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ // Current lane id; could be logical.
+ ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
+ ImplicitParamDecl::Other);
+ // Offset of the remote source lane relative to the current lane.
+ ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.ShortTy, ImplicitParamDecl::Other);
+ // Algorithm version. This is expected to be known at compile time.
+ ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.ShortTy, ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(&ReduceListArg);
+ Args.push_back(&LaneIDArg);
+ Args.push_back(&RemoteLaneOffsetArg);
+ Args.push_back(&AlgoVerArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
+
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ Address LocalReduceList(
+ Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
+ C.VoidPtrTy, SourceLocation()),
+ CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
+ CGF.getPointerAlign());
+
+ Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
+ llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
+ AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
+
+ Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
+ llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
+ AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
+
+ Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
+ llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
+ AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
+
+ // Create a local thread-private variable to host the Reduce list
+ // from a remote lane.
+ Address RemoteReduceList =
+ CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
+
+ // This loop iterates through the list of reduce elements and copies,
+ // element by element, from a remote lane in the warp to RemoteReduceList,
+ // hosted on the thread's stack.
+ emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
+ LocalReduceList, RemoteReduceList,
+ {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
+ /*ScratchpadIndex=*/nullptr,
+ /*ScratchpadWidth=*/nullptr});
+
+ // The actions to be performed on the Remote Reduce list is dependent
+ // on the algorithm version.
+ //
+ // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
+ // LaneId % 2 == 0 && Offset > 0):
+ // do the reduction value aggregation
+ //
+ // The thread local variable Reduce list is mutated in place to host the
+ // reduced data, which is the aggregated value produced from local and
+ // remote lanes.
+ //
+ // Note that AlgoVer is expected to be a constant integer known at compile
+ // time.
+ // When AlgoVer==0, the first conjunction evaluates to true, making
+ // the entire predicate true during compile time.
+ // When AlgoVer==1, the second conjunction has only the second part to be
+ // evaluated during runtime. Other conjunctions evaluates to false
+ // during compile time.
+ // When AlgoVer==2, the third conjunction has only the second part to be
+ // evaluated during runtime. Other conjunctions evaluates to false
+ // during compile time.
+ llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
+
+ llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
+ llvm::Value *CondAlgo1 = Bld.CreateAnd(
+ Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
+
+ llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
+ llvm::Value *CondAlgo2 = Bld.CreateAnd(
+ Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
+ CondAlgo2 = Bld.CreateAnd(
+ CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
+
+ llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
+ CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
+
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
+ llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
+ llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
+ Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
+
+ CGF.EmitBlock(ThenBB);
+ // reduce_function(LocalReduceList, RemoteReduceList)
+ llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ LocalReduceList.getPointer(), CGF.VoidPtrTy);
+ llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ RemoteReduceList.getPointer(), CGF.VoidPtrTy);
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
+ CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
+ Bld.CreateBr(MergeBB);
+
+ CGF.EmitBlock(ElseBB);
+ Bld.CreateBr(MergeBB);
+
+ CGF.EmitBlock(MergeBB);
+
+ // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
+ // Reduce list.
+ Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
+ llvm::Value *CondCopy = Bld.CreateAnd(
+ Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
+
+ llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
+ llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
+ llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
+ Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
+
+ CGF.EmitBlock(CpyThenBB);
+ emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
+ RemoteReduceList, LocalReduceList);
+ Bld.CreateBr(CpyMergeBB);
+
+ CGF.EmitBlock(CpyElseBB);
+ Bld.CreateBr(CpyMergeBB);
+
+ CGF.EmitBlock(CpyMergeBB);
+
+ CGF.FinishFunction();
+ return Fn;
+}
+
+/// This function emits a helper that copies all the reduction variables from
+/// the team into the provided global buffer for the reduction variables.
+///
+/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
+/// For all data entries D in reduce_data:
+/// Copy local D to buffer.D[Idx]
+static llvm::Value *emitListToGlobalCopyFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, SourceLocation Loc,
+ const RecordDecl *TeamReductionRec,
+ const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &VarFieldMap) {
+ ASTContext &C = CGM.getContext();
+
+ // Buffer: global reduction buffer.
+ ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ // Idx: index of the buffer.
+ ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
+ ImplicitParamDecl::Other);
+ // ReduceList: thread local Reduce list.
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(&BufferArg);
+ Args.push_back(&IdxArg);
+ Args.push_back(&ReduceListArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ Address LocalReduceList(
+ Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
+ C.VoidPtrTy, Loc),
+ CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
+ CGF.getPointerAlign());
+ QualType StaticTy = C.getRecordType(TeamReductionRec);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
+ LLVMReductionsBufferTy->getPointerTo());
+ llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
+ CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ /*Volatile=*/false, C.IntTy,
+ Loc)};
+ unsigned Idx = 0;
+ for (const Expr *Private : Privates) {
+ // Reduce element = LocalReduceList[i]
+ Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
+ llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
+ ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
+ // elemptr = ((CopyType*)(elemptrptr)) + I
+ ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
+ Address ElemPtr =
+ Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
+ // Global = Buffer.VD[Idx];
+ const FieldDecl *FD = VarFieldMap.lookup(VD);
+ LValue GlobLVal = CGF.EmitLValueForField(
+ CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ llvm::Value *BufferPtr =
+ Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
+ GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
+ switch (CGF.getEvaluationKind(Private->getType())) {
+ case TEK_Scalar: {
+ llvm::Value *V = CGF.EmitLoadOfScalar(
+ ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
+ LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
+ CGF.EmitStoreOfScalar(V, GlobLVal);
+ break;
+ }
+ case TEK_Complex: {
+ CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
+ CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
+ CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
+ break;
+ }
+ case TEK_Aggregate:
+ CGF.EmitAggregateCopy(GlobLVal,
+ CGF.MakeAddrLValue(ElemPtr, Private->getType()),
+ Private->getType(), AggValueSlot::DoesNotOverlap);
+ break;
+ }
+ ++Idx;
+ }
+
+ CGF.FinishFunction();
+ return Fn;
+}
+
+/// This function emits a helper that reduces all the reduction variables from
+/// the team into the provided global buffer for the reduction variables.
+///
+/// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
+/// void *GlobPtrs[];
+/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
+/// ...
+/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
+/// reduce_function(GlobPtrs, reduce_data);
+static llvm::Value *emitListToGlobalReduceFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, SourceLocation Loc,
+ const RecordDecl *TeamReductionRec,
+ const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &VarFieldMap,
+ llvm::Function *ReduceFn) {
+ ASTContext &C = CGM.getContext();
+
+ // Buffer: global reduction buffer.
+ ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ // Idx: index of the buffer.
+ ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
+ ImplicitParamDecl::Other);
+ // ReduceList: thread local Reduce list.
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(&BufferArg);
+ Args.push_back(&IdxArg);
+ Args.push_back(&ReduceListArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ QualType StaticTy = C.getRecordType(TeamReductionRec);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
+ LLVMReductionsBufferTy->getPointerTo());
+
+ // 1. Build a list of reduction variables.
+ // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
+ Address ReductionList =
+ CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
+ auto IPriv = Privates.begin();
+ llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
+ CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ /*Volatile=*/false, C.IntTy,
+ Loc)};
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
+ Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ // Global = Buffer.VD[Idx];
+ const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
+ const FieldDecl *FD = VarFieldMap.lookup(VD);
+ LValue GlobLVal = CGF.EmitLValueForField(
+ CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ llvm::Value *BufferPtr =
+ Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
+ llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
+ CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
+ if ((*IPriv)->getType()->isVariablyModifiedType()) {
+ // Store array size.
+ ++Idx;
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ llvm::Value *Size = CGF.Builder.CreateIntCast(
+ CGF.getVLASize(
+ CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
+ .NumElts,
+ CGF.SizeTy, /*isSigned=*/false);
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
+ Elem);
+ }
+ }
+
+ // Call reduce_function(GlobalReduceList, ReduceList)
+ llvm::Value *GlobalReduceList =
+ CGF.EmitCastToVoidPtr(ReductionList.getPointer());
+ Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
+ AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
+ CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
+ CGF.FinishFunction();
+ return Fn;
+}
+
+/// This function emits a helper that copies all the reduction variables from
+/// the team into the provided global buffer for the reduction variables.
+///
+/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
+/// For all data entries D in reduce_data:
+/// Copy buffer.D[Idx] to local D;
+static llvm::Value *emitGlobalToListCopyFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, SourceLocation Loc,
+ const RecordDecl *TeamReductionRec,
+ const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &VarFieldMap) {
+ ASTContext &C = CGM.getContext();
+
+ // Buffer: global reduction buffer.
+ ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ // Idx: index of the buffer.
+ ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
+ ImplicitParamDecl::Other);
+ // ReduceList: thread local Reduce list.
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(&BufferArg);
+ Args.push_back(&IdxArg);
+ Args.push_back(&ReduceListArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ Address LocalReduceList(
+ Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
+ C.VoidPtrTy, Loc),
+ CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
+ CGF.getPointerAlign());
+ QualType StaticTy = C.getRecordType(TeamReductionRec);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
+ LLVMReductionsBufferTy->getPointerTo());
+
+ llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
+ CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ /*Volatile=*/false, C.IntTy,
+ Loc)};
+ unsigned Idx = 0;
+ for (const Expr *Private : Privates) {
+ // Reduce element = LocalReduceList[i]
+ Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
+ llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
+ ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
+ // elemptr = ((CopyType*)(elemptrptr)) + I
+ ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
+ Address ElemPtr =
+ Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
+ // Global = Buffer.VD[Idx];
+ const FieldDecl *FD = VarFieldMap.lookup(VD);
+ LValue GlobLVal = CGF.EmitLValueForField(
+ CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ llvm::Value *BufferPtr =
+ Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
+ GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
+ switch (CGF.getEvaluationKind(Private->getType())) {
+ case TEK_Scalar: {
+ llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
+ CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
+ LValueBaseInfo(AlignmentSource::Type),
+ TBAAAccessInfo());
+ break;
+ }
+ case TEK_Complex: {
+ CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
+ CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
+ /*isInit=*/false);
+ break;
+ }
+ case TEK_Aggregate:
+ CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
+ GlobLVal, Private->getType(),
+ AggValueSlot::DoesNotOverlap);
+ break;
+ }
+ ++Idx;
+ }
+
+ CGF.FinishFunction();
+ return Fn;
+}
+
+/// This function emits a helper that reduces all the reduction variables from
+/// the team into the provided global buffer for the reduction variables.
+///
+/// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
+/// void *GlobPtrs[];
+/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
+/// ...
+/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
+/// reduce_function(reduce_data, GlobPtrs);
+static llvm::Value *emitGlobalToListReduceFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, SourceLocation Loc,
+ const RecordDecl *TeamReductionRec,
+ const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &VarFieldMap,
+ llvm::Function *ReduceFn) {
+ ASTContext &C = CGM.getContext();
+
+ // Buffer: global reduction buffer.
+ ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ // Idx: index of the buffer.
+ ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
+ ImplicitParamDecl::Other);
+ // ReduceList: thread local Reduce list.
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(&BufferArg);
+ Args.push_back(&IdxArg);
+ Args.push_back(&ReduceListArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ QualType StaticTy = C.getRecordType(TeamReductionRec);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
+ LLVMReductionsBufferTy->getPointerTo());
+
+ // 1. Build a list of reduction variables.
+ // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
+ Address ReductionList =
+ CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
+ auto IPriv = Privates.begin();
+ llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
+ CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ /*Volatile=*/false, C.IntTy,
+ Loc)};
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
+ Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ // Global = Buffer.VD[Idx];
+ const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
+ const FieldDecl *FD = VarFieldMap.lookup(VD);
+ LValue GlobLVal = CGF.EmitLValueForField(
+ CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ llvm::Value *BufferPtr =
+ Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
+ llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
+ CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
+ if ((*IPriv)->getType()->isVariablyModifiedType()) {
+ // Store array size.
+ ++Idx;
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ llvm::Value *Size = CGF.Builder.CreateIntCast(
+ CGF.getVLASize(
+ CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
+ .NumElts,
+ CGF.SizeTy, /*isSigned=*/false);
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
+ Elem);
+ }
+ }
+
+ // Call reduce_function(ReduceList, GlobalReduceList)
+ llvm::Value *GlobalReduceList =
+ CGF.EmitCastToVoidPtr(ReductionList.getPointer());
+ Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
+ AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
+ CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
+ CGF.FinishFunction();
+ return Fn;
+}
+
+///
+/// Design of OpenMP reductions on the GPU
+///
+/// Consider a typical OpenMP program with one or more reduction
+/// clauses:
+///
+/// float foo;
+/// double bar;
+/// #pragma omp target teams distribute parallel for \
+/// reduction(+:foo) reduction(*:bar)
+/// for (int i = 0; i < N; i++) {
+/// foo += A[i]; bar *= B[i];
+/// }
+///
+/// where 'foo' and 'bar' are reduced across all OpenMP threads in
+/// all teams. In our OpenMP implementation on the NVPTX device an
+/// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
+/// within a team are mapped to CUDA threads within a threadblock.
+/// Our goal is to efficiently aggregate values across all OpenMP
+/// threads such that:
+///
+/// - the compiler and runtime are logically concise, and
+/// - the reduction is performed efficiently in a hierarchical
+/// manner as follows: within OpenMP threads in the same warp,
+/// across warps in a threadblock, and finally across teams on
+/// the NVPTX device.
+///
+/// Introduction to Decoupling
+///
+/// We would like to decouple the compiler and the runtime so that the
+/// latter is ignorant of the reduction variables (number, data types)
+/// and the reduction operators. This allows a simpler interface
+/// and implementation while still attaining good performance.
+///
+/// Pseudocode for the aforementioned OpenMP program generated by the
+/// compiler is as follows:
+///
+/// 1. Create private copies of reduction variables on each OpenMP
+/// thread: 'foo_private', 'bar_private'
+/// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
+/// to it and writes the result in 'foo_private' and 'bar_private'
+/// respectively.
+/// 3. Call the OpenMP runtime on the GPU to reduce within a team
+/// and store the result on the team master:
+///
+/// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
+/// reduceData, shuffleReduceFn, interWarpCpyFn)
+///
+/// where:
+/// struct ReduceData {
+/// double *foo;
+/// double *bar;
+/// } reduceData
+/// reduceData.foo = &foo_private
+/// reduceData.bar = &bar_private
+///
+/// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
+/// auxiliary functions generated by the compiler that operate on
+/// variables of type 'ReduceData'. They aid the runtime perform
+/// algorithmic steps in a data agnostic manner.
+///
+/// 'shuffleReduceFn' is a pointer to a function that reduces data
+/// of type 'ReduceData' across two OpenMP threads (lanes) in the
+/// same warp. It takes the following arguments as input:
+///
+/// a. variable of type 'ReduceData' on the calling lane,
+/// b. its lane_id,
+/// c. an offset relative to the current lane_id to generate a
+/// remote_lane_id. The remote lane contains the second
+/// variable of type 'ReduceData' that is to be reduced.
+/// d. an algorithm version parameter determining which reduction
+/// algorithm to use.
+///
+/// 'shuffleReduceFn' retrieves data from the remote lane using
+/// efficient GPU shuffle intrinsics and reduces, using the
+/// algorithm specified by the 4th parameter, the two operands
+/// element-wise. The result is written to the first operand.
+///
+/// Different reduction algorithms are implemented in different
+/// runtime functions, all calling 'shuffleReduceFn' to perform
+/// the essential reduction step. Therefore, based on the 4th
+/// parameter, this function behaves slightly differently to
+/// cooperate with the runtime to ensure correctness under
+/// different circumstances.
+///
+/// 'InterWarpCpyFn' is a pointer to a function that transfers
+/// reduced variables across warps. It tunnels, through CUDA
+/// shared memory, the thread-private data of type 'ReduceData'
+/// from lane 0 of each warp to a lane in the first warp.
+/// 4. Call the OpenMP runtime on the GPU to reduce across teams.
+/// The last team writes the global reduced value to memory.
+///
+/// ret = __kmpc_nvptx_teams_reduce_nowait(...,
+/// reduceData, shuffleReduceFn, interWarpCpyFn,
+/// scratchpadCopyFn, loadAndReduceFn)
+///
+/// 'scratchpadCopyFn' is a helper that stores reduced
+/// data from the team master to a scratchpad array in
+/// global memory.
+///
+/// 'loadAndReduceFn' is a helper that loads data from
+/// the scratchpad array and reduces it with the input
+/// operand.
+///
+/// These compiler generated functions hide address
+/// calculation and alignment information from the runtime.
+/// 5. if ret == 1:
+/// The team master of the last team stores the reduced
+/// result to the globals in memory.
+/// foo += reduceData.foo; bar *= reduceData.bar
+///
+///
+/// Warp Reduction Algorithms
+///
+/// On the warp level, we have three algorithms implemented in the
+/// OpenMP runtime depending on the number of active lanes:
+///
+/// Full Warp Reduction
+///
+/// The reduce algorithm within a warp where all lanes are active
+/// is implemented in the runtime as follows:
+///
+/// full_warp_reduce(void *reduce_data,
+/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
+/// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
+/// ShuffleReduceFn(reduce_data, 0, offset, 0);
+/// }
+///
+/// The algorithm completes in log(2, WARPSIZE) steps.
+///
+/// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
+/// not used therefore we save instructions by not retrieving lane_id
+/// from the corresponding special registers. The 4th parameter, which
+/// represents the version of the algorithm being used, is set to 0 to
+/// signify full warp reduction.
+///
+/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
+///
+/// #reduce_elem refers to an element in the local lane's data structure
+/// #remote_elem is retrieved from a remote lane
+/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
+/// reduce_elem = reduce_elem REDUCE_OP remote_elem;
+///
+/// Contiguous Partial Warp Reduction
+///
+/// This reduce algorithm is used within a warp where only the first
+/// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
+/// number of OpenMP threads in a parallel region is not a multiple of
+/// WARPSIZE. The algorithm is implemented in the runtime as follows:
+///
+/// void
+/// contiguous_partial_reduce(void *reduce_data,
+/// kmp_ShuffleReductFctPtr ShuffleReduceFn,
+/// int size, int lane_id) {
+/// int curr_size;
+/// int offset;
+/// curr_size = size;
+/// mask = curr_size/2;
+/// while (offset>0) {
+/// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
+/// curr_size = (curr_size+1)/2;
+/// offset = curr_size/2;
+/// }
+/// }
+///
+/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
+///
+/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
+/// if (lane_id < offset)
+/// reduce_elem = reduce_elem REDUCE_OP remote_elem
+/// else
+/// reduce_elem = remote_elem
+///
+/// This algorithm assumes that the data to be reduced are located in a
+/// contiguous subset of lanes starting from the first. When there is
+/// an odd number of active lanes, the data in the last lane is not
+/// aggregated with any other lane's dat but is instead copied over.
+///
+/// Dispersed Partial Warp Reduction
+///
+/// This algorithm is used within a warp when any discontiguous subset of
+/// lanes are active. It is used to implement the reduction operation
+/// across lanes in an OpenMP simd region or in a nested parallel region.
+///
+/// void
+/// dispersed_partial_reduce(void *reduce_data,
+/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
+/// int size, remote_id;
+/// int logical_lane_id = number_of_active_lanes_before_me() * 2;
+/// do {
+/// remote_id = next_active_lane_id_right_after_me();
+/// # the above function returns 0 of no active lane
+/// # is present right after the current lane.
+/// size = number_of_active_lanes_in_this_warp();
+/// logical_lane_id /= 2;
+/// ShuffleReduceFn(reduce_data, logical_lane_id,
+/// remote_id-1-threadIdx.x, 2);
+/// } while (logical_lane_id % 2 == 0 && size > 1);
+/// }
+///
+/// There is no assumption made about the initial state of the reduction.
+/// Any number of lanes (>=1) could be active at any position. The reduction
+/// result is returned in the first active lane.
+///
+/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
+///
+/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
+/// if (lane_id % 2 == 0 && offset > 0)
+/// reduce_elem = reduce_elem REDUCE_OP remote_elem
+/// else
+/// reduce_elem = remote_elem
+///
+///
+/// Intra-Team Reduction
+///
+/// This function, as implemented in the runtime call
+/// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
+/// threads in a team. It first reduces within a warp using the
+/// aforementioned algorithms. We then proceed to gather all such
+/// reduced values at the first warp.
+///
+/// The runtime makes use of the function 'InterWarpCpyFn', which copies
+/// data from each of the "warp master" (zeroth lane of each warp, where
+/// warp-reduced data is held) to the zeroth warp. This step reduces (in
+/// a mathematical sense) the problem of reduction across warp masters in
+/// a block to the problem of warp reduction.
+///
+///
+/// Inter-Team Reduction
+///
+/// Once a team has reduced its data to a single value, it is stored in
+/// a global scratchpad array. Since each team has a distinct slot, this
+/// can be done without locking.
+///
+/// The last team to write to the scratchpad array proceeds to reduce the
+/// scratchpad array. One or more workers in the last team use the helper
+/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
+/// the k'th worker reduces every k'th element.
+///
+/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
+/// reduce across workers and compute a globally reduced value.
+///
+void CGOpenMPRuntimeGPU::emitReduction(
+ CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
+ if (!CGF.HaveInsertPoint())
+ return;
+
+ bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
+#ifndef NDEBUG
+ bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
+#endif
+
+ if (Options.SimpleReduction) {
+ assert(!TeamsReduction && !ParallelReduction &&
+ "Invalid reduction selection in emitReduction.");
+ CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
+ ReductionOps, Options);
+ return;
+ }
+
+ assert((TeamsReduction || ParallelReduction) &&
+ "Invalid reduction selection in emitReduction.");
+
+ // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
+ // RedList, shuffle_reduce_func, interwarp_copy_func);
+ // or
+ // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadId = getThreadID(CGF, Loc);
+
+ llvm::Value *Res;
+ ASTContext &C = CGM.getContext();
+ // 1. Build a list of reduction variables.
+ // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
+ auto Size = RHSExprs.size();
+ for (const Expr *E : Privates) {
+ if (E->getType()->isVariablyModifiedType())
+ // Reserve place for array size.
+ ++Size;
+ }
+ llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
+ QualType ReductionArrayTy =
+ C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ Address ReductionList =
+ CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
+ auto IPriv = Privates.begin();
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
+ Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
+ Elem);
+ if ((*IPriv)->getType()->isVariablyModifiedType()) {
+ // Store array size.
+ ++Idx;
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ llvm::Value *Size = CGF.Builder.CreateIntCast(
+ CGF.getVLASize(
+ CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
+ .NumElts,
+ CGF.SizeTy, /*isSigned=*/false);
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
+ Elem);
+ }
+ }
+
+ llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ ReductionList.getPointer(), CGF.VoidPtrTy);
+ llvm::Function *ReductionFn = emitReductionFunction(
+ Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
+ LHSExprs, RHSExprs, ReductionOps);
+ llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
+ llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
+ CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
+ llvm::Value *InterWarpCopyFn =
+ emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
+
+ if (ParallelReduction) {
+ llvm::Value *Args[] = {RTLoc,
+ ThreadId,
+ CGF.Builder.getInt32(RHSExprs.size()),
+ ReductionArrayTySize,
+ RL,
+ ShuffleAndReduceFn,
+ InterWarpCopyFn};
+
+ Res = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
+ Args);
+ } else {
+ assert(TeamsReduction && "expected teams reduction.");
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
+ llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
+ int Cnt = 0;
+ for (const Expr *DRE : Privates) {
+ PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
+ ++Cnt;
+ }
+ const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
+ CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
+ C.getLangOpts().OpenMPCUDAReductionBufNum);
+ TeamsReductions.push_back(TeamReductionRec);
+ if (!KernelTeamsReductionPtr) {
+ KernelTeamsReductionPtr = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, nullptr,
+ "_openmp_teams_reductions_buffer_$_$ptr");
+ }
+ llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
+ Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
+ /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
+ llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
+ CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
+ llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
+ CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
+ ReductionFn);
+ llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
+ CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
+ llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
+ CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
+ ReductionFn);
+
+ llvm::Value *Args[] = {
+ RTLoc,
+ ThreadId,
+ GlobalBufferPtr,
+ CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
+ RL,
+ ShuffleAndReduceFn,
+ InterWarpCopyFn,
+ GlobalToBufferCpyFn,
+ GlobalToBufferRedFn,
+ BufferToGlobalCpyFn,
+ BufferToGlobalRedFn};
+
+ Res = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
+ Args);
+ }
+
+ // 5. Build if (res == 1)
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
+ llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
+ Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
+ CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
+
+ // 6. Build then branch: where we have reduced values in the master
+ // thread in each team.
+ // __kmpc_end_reduce{_nowait}(<gtid>);
+ // break;
+ CGF.EmitBlock(ThenBB);
+
+ // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
+ auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
+ this](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ auto IPriv = Privates.begin();
+ auto ILHS = LHSExprs.begin();
+ auto IRHS = RHSExprs.begin();
+ for (const Expr *E : ReductionOps) {
+ emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
+ cast<DeclRefExpr>(*IRHS));
+ ++IPriv;
+ ++ILHS;
+ ++IRHS;
+ }
+ };
+ llvm::Value *EndArgs[] = {ThreadId};
+ RegionCodeGenTy RCG(CodeGen);
+ NVPTXActionTy Action(
+ nullptr, llvm::None,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
+ EndArgs);
+ RCG.setAction(Action);
+ RCG(CGF);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+const VarDecl *
+CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
+ const VarDecl *NativeParam) const {
+ if (!NativeParam->getType()->isReferenceType())
+ return NativeParam;
+ QualType ArgType = NativeParam->getType();
+ QualifierCollector QC;
+ const Type *NonQualTy = QC.strip(ArgType);
+ QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
+ if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
+ if (Attr->getCaptureKind() == OMPC_map) {
+ PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
+ LangAS::opencl_global);
+ } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
+ PointeeTy.isConstant(CGM.getContext())) {
+ PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
+ LangAS::opencl_generic);
+ }
+ }
+ ArgType = CGM.getContext().getPointerType(PointeeTy);
+ QC.addRestrict();
+ enum { NVPTX_local_addr = 5 };
+ QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
+ ArgType = QC.apply(CGM.getContext(), ArgType);
+ if (isa<ImplicitParamDecl>(NativeParam))
+ return ImplicitParamDecl::Create(
+ CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
+ NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
+ return ParmVarDecl::Create(
+ CGM.getContext(),
+ const_cast<DeclContext *>(NativeParam->getDeclContext()),
+ NativeParam->getBeginLoc(), NativeParam->getLocation(),
+ NativeParam->getIdentifier(), ArgType,
+ /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
+}
+
+Address
+CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
+ const VarDecl *NativeParam,
+ const VarDecl *TargetParam) const {
+ assert(NativeParam != TargetParam &&
+ NativeParam->getType()->isReferenceType() &&
+ "Native arg must not be the same as target arg.");
+ Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
+ QualType NativeParamType = NativeParam->getType();
+ QualifierCollector QC;
+ const Type *NonQualTy = QC.strip(NativeParamType);
+ QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
+ unsigned NativePointeeAddrSpace =
+ CGF.getContext().getTargetAddressSpace(NativePointeeTy);
+ QualType TargetTy = TargetParam->getType();
+ llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
+ LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
+ // First cast to generic.
+ TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
+ /*AddrSpace=*/0));
+ // Cast from generic to native address space.
+ TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
+ NativePointeeAddrSpace));
+ Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
+ CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
+ NativeParamType);
+ return NativeParamAddr;
+}
+
+void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
+ ArrayRef<llvm::Value *> Args) const {
+ SmallVector<llvm::Value *, 4> TargetArgs;
+ TargetArgs.reserve(Args.size());
+ auto *FnType = OutlinedFn.getFunctionType();
+ for (unsigned I = 0, E = Args.size(); I < E; ++I) {
+ if (FnType->isVarArg() && FnType->getNumParams() <= I) {
+ TargetArgs.append(std::next(Args.begin(), I), Args.end());
+ break;
+ }
+ llvm::Type *TargetType = FnType->getParamType(I);
+ llvm::Value *NativeArg = Args[I];
+ if (!TargetType->isPointerTy()) {
+ TargetArgs.emplace_back(NativeArg);
+ continue;
+ }
+ llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ NativeArg,
+ NativeArg->getType()->getPointerElementType()->getPointerTo());
+ TargetArgs.emplace_back(
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
+ }
+ CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
+}
+
+/// Emit function which wraps the outline parallel region
+/// and controls the arguments which are passed to this function.
+/// The wrapper ensures that the outlined function is called
+/// with the correct arguments when data is shared.
+llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
+ llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
+ ASTContext &Ctx = CGM.getContext();
+ const auto &CS = *D.getCapturedStmt(OMPD_parallel);
+
+ // Create a function that takes as argument the source thread.
+ FunctionArgList WrapperArgs;
+ QualType Int16QTy =
+ Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
+ QualType Int32QTy =
+ Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
+ ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
+ /*Id=*/nullptr, Int16QTy,
+ ImplicitParamDecl::Other);
+ ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
+ /*Id=*/nullptr, Int32QTy,
+ ImplicitParamDecl::Other);
+ WrapperArgs.emplace_back(&ParallelLevelArg);
+ WrapperArgs.emplace_back(&WrapperArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
+
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
+ Fn->setDoesNotRecurse();
+
+ CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
+ CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
+ D.getBeginLoc(), D.getBeginLoc());
+
+ const auto *RD = CS.getCapturedRecordDecl();
+ auto CurField = RD->field_begin();
+
+ Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
+ /*Name=*/".zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ // Get the array of arguments.
+ SmallVector<llvm::Value *, 8> Args;
+
+ Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
+ Args.emplace_back(ZeroAddr.getPointer());
+
+ CGBuilderTy &Bld = CGF.Builder;
+ auto CI = CS.capture_begin();
+
+ // Use global memory for data sharing.
+ // Handle passing of global args to workers.
+ Address GlobalArgs =
+ CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
+ llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
+ llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
+ DataSharingArgs);
+
+ // Retrieve the shared variables from the list of references returned
+ // by the runtime. Pass the variables to the outlined function.
+ Address SharedArgListAddress = Address::invalid();
+ if (CS.capture_size() > 0 ||
+ isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
+ SharedArgListAddress = CGF.EmitLoadOfPointer(
+ GlobalArgs, CGF.getContext()
+ .getPointerType(CGF.getContext().getPointerType(
+ CGF.getContext().VoidPtrTy))
+ .castAs<PointerType>());
+ }
+ unsigned Idx = 0;
+ if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
+ Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
+ Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ Src, CGF.SizeTy->getPointerTo());
+ llvm::Value *LB = CGF.EmitLoadOfScalar(
+ TypedAddress,
+ /*Volatile=*/false,
+ CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
+ cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
+ Args.emplace_back(LB);
+ ++Idx;
+ Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
+ TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ Src, CGF.SizeTy->getPointerTo());
+ llvm::Value *UB = CGF.EmitLoadOfScalar(
+ TypedAddress,
+ /*Volatile=*/false,
+ CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
+ cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
+ Args.emplace_back(UB);
+ ++Idx;
+ }
+ if (CS.capture_size() > 0) {
+ ASTContext &CGFContext = CGF.getContext();
+ for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
+ QualType ElemTy = CurField->getType();
+ Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
+ Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
+ llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
+ /*Volatile=*/false,
+ CGFContext.getPointerType(ElemTy),
+ CI->getLocation());
+ if (CI->capturesVariableByCopy() &&
+ !CI->getCapturedVar()->getType()->isAnyPointerType()) {
+ Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
+ CI->getLocation());
+ }
+ Args.emplace_back(Arg);
+ }
+ }
+
+ emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
+ CGF.FinishFunction();
+ return Fn;
+}
+
+void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
+ const Decl *D) {
+ if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
+ return;
+
+ assert(D && "Expected function or captured|block decl.");
+ assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
+ "Function is registered already.");
+ assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
+ "Team is set but not processed.");
+ const Stmt *Body = nullptr;
+ bool NeedToDelayGlobalization = false;
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ Body = FD->getBody();
+ } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
+ Body = BD->getBody();
+ } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
+ Body = CD->getBody();
+ NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
+ if (NeedToDelayGlobalization &&
+ getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
+ return;
+ }
+ if (!Body)
+ return;
+ CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
+ VarChecker.Visit(Body);
+ const RecordDecl *GlobalizedVarsRecord =
+ VarChecker.getGlobalizedRecord(IsInTTDRegion);
+ TeamAndReductions.first = nullptr;
+ TeamAndReductions.second.clear();
+ ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
+ VarChecker.getEscapedVariableLengthDecls();
+ if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
+ return;
+ auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
+ I->getSecond().MappedParams =
+ std::make_unique<CodeGenFunction::OMPMapVars>();
+ I->getSecond().GlobalRecord = GlobalizedVarsRecord;
+ I->getSecond().EscapedParameters.insert(
+ VarChecker.getEscapedParameters().begin(),
+ VarChecker.getEscapedParameters().end());
+ I->getSecond().EscapedVariableLengthDecls.append(
+ EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
+ DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
+ for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
+ assert(VD->isCanonicalDecl() && "Expected canonical declaration");
+ const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
+ Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
+ }
+ if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
+ CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
+ VarChecker.Visit(Body);
+ I->getSecond().SecondaryGlobalRecord =
+ VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
+ I->getSecond().SecondaryLocalVarData.emplace();
+ DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
+ for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
+ assert(VD->isCanonicalDecl() && "Expected canonical declaration");
+ const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
+ Data.insert(
+ std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
+ }
+ }
+ if (!NeedToDelayGlobalization) {
+ emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
+ struct GlobalizationScope final : EHScopeStack::Cleanup {
+ GlobalizationScope() = default;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
+ .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
+ }
+ };
+ CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
+ }
+}
+
+Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
+ const VarDecl *VD) {
+ if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
+ const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
+ auto AS = LangAS::Default;
+ switch (A->getAllocatorType()) {
+ // Use the default allocator here as by default local vars are
+ // threadlocal.
+ case OMPAllocateDeclAttr::OMPNullMemAlloc:
+ case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
+ case OMPAllocateDeclAttr::OMPThreadMemAlloc:
+ case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
+ case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
+ // Follow the user decision - use default allocation.
+ return Address::invalid();
+ case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
+ // TODO: implement aupport for user-defined allocators.
+ return Address::invalid();
+ case OMPAllocateDeclAttr::OMPConstMemAlloc:
+ AS = LangAS::cuda_constant;
+ break;
+ case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
+ AS = LangAS::cuda_shared;
+ break;
+ case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
+ case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
+ break;
+ }
+ llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), VarTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
+ VD->getName(),
+ /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(AS));
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ GV->setAlignment(Align.getAsAlign());
+ return Address(
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
+ VD->getType().getAddressSpace()))),
+ Align);
+ }
+
+ if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
+ return Address::invalid();
+
+ VD = VD->getCanonicalDecl();
+ auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
+ if (I == FunctionGlobalizedDecls.end())
+ return Address::invalid();
+ auto VDI = I->getSecond().LocalVarData.find(VD);
+ if (VDI != I->getSecond().LocalVarData.end())
+ return VDI->second.PrivateAddr;
+ if (VD->hasAttrs()) {
+ for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
+ E(VD->attr_end());
+ IT != E; ++IT) {
+ auto VDI = I->getSecond().LocalVarData.find(
+ cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
+ ->getCanonicalDecl());
+ if (VDI != I->getSecond().LocalVarData.end())
+ return VDI->second.PrivateAddr;
+ }
+ }
+
+ return Address::invalid();
+}
+
+void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) {
+ FunctionGlobalizedDecls.erase(CGF.CurFn);
+ CGOpenMPRuntime::functionFinished(CGF);
+}
+
+void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk(
+ CodeGenFunction &CGF, const OMPLoopDirective &S,
+ OpenMPDistScheduleClauseKind &ScheduleKind,
+ llvm::Value *&Chunk) const {
+ auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
+ ScheduleKind = OMPC_DIST_SCHEDULE_static;
+ Chunk = CGF.EmitScalarConversion(
+ RT.getGPUNumThreads(CGF),
+ CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
+ S.getIterationVariable()->getType(), S.getBeginLoc());
+ return;
+ }
+ CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
+ CGF, S, ScheduleKind, Chunk);
+}
+
+void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk(
+ CodeGenFunction &CGF, const OMPLoopDirective &S,
+ OpenMPScheduleClauseKind &ScheduleKind,
+ const Expr *&ChunkExpr) const {
+ ScheduleKind = OMPC_SCHEDULE_static;
+ // Chunk size is 1 in this case.
+ llvm::APInt ChunkSize(32, 1);
+ ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
+ CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
+ SourceLocation());
+}
+
+void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
+ assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
+ " Expected target-based directive.");
+ const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
+ for (const CapturedStmt::Capture &C : CS->captures()) {
+ // Capture variables captured by reference in lambdas for target-based
+ // directives.
+ if (!C.capturesVariable())
+ continue;
+ const VarDecl *VD = C.getCapturedVar();
+ const auto *RD = VD->getType()
+ .getCanonicalType()
+ .getNonReferenceType()
+ ->getAsCXXRecordDecl();
+ if (!RD || !RD->isLambda())
+ continue;
+ Address VDAddr = CGF.GetAddrOfLocalVar(VD);
+ LValue VDLVal;
+ if (VD->getType().getCanonicalType()->isReferenceType())
+ VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
+ else
+ VDLVal = CGF.MakeAddrLValue(
+ VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
+ llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ FieldDecl *ThisCapture = nullptr;
+ RD->getCaptureFields(Captures, ThisCapture);
+ if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
+ LValue ThisLVal =
+ CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
+ llvm::Value *CXXThis = CGF.LoadCXXThis();
+ CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
+ }
+ for (const LambdaCapture &LC : RD->captures()) {
+ if (LC.getCaptureKind() != LCK_ByRef)
+ continue;
+ const VarDecl *VD = LC.getCapturedVar();
+ if (!CS->capturesVariable(VD))
+ continue;
+ auto It = Captures.find(VD);
+ assert(It != Captures.end() && "Found lambda capture without field.");
+ LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
+ Address VDAddr = CGF.GetAddrOfLocalVar(VD);
+ if (VD->getType().getCanonicalType()->isReferenceType())
+ VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
+ VD->getType().getCanonicalType())
+ .getAddress(CGF);
+ CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
+ }
+ }
+}
+
+unsigned CGOpenMPRuntimeGPU::getDefaultFirstprivateAddressSpace() const {
+ return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
+}
+
+bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
+ LangAS &AS) {
+ if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
+ return false;
+ const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
+ switch(A->getAllocatorType()) {
+ case OMPAllocateDeclAttr::OMPNullMemAlloc:
+ case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
+ // Not supported, fallback to the default mem space.
+ case OMPAllocateDeclAttr::OMPThreadMemAlloc:
+ case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
+ case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
+ case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
+ case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
+ AS = LangAS::Default;
+ return true;
+ case OMPAllocateDeclAttr::OMPConstMemAlloc:
+ AS = LangAS::cuda_constant;
+ return true;
+ case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
+ AS = LangAS::cuda_shared;
+ return true;
+ case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
+ llvm_unreachable("Expected predefined allocator for the variables with the "
+ "static storage.");
+ }
+ return false;
+}
+
+// Get current CudaArch and ignore any unknown values
+static CudaArch getCudaArch(CodeGenModule &CGM) {
+ if (!CGM.getTarget().hasFeature("ptx"))
+ return CudaArch::UNKNOWN;
+ for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
+ if (Feature.getValue()) {
+ CudaArch Arch = StringToCudaArch(Feature.getKey());
+ if (Arch != CudaArch::UNKNOWN)
+ return Arch;
+ }
+ }
+ return CudaArch::UNKNOWN;
+}
+
+/// Check to see if target architecture supports unified addressing which is
+/// a restriction for OpenMP requires clause "unified_shared_memory".
+void CGOpenMPRuntimeGPU::processRequiresDirective(
+ const OMPRequiresDecl *D) {
+ for (const OMPClause *Clause : D->clauselists()) {
+ if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
+ CudaArch Arch = getCudaArch(CGM);
+ switch (Arch) {
+ case CudaArch::SM_20:
+ case CudaArch::SM_21:
+ case CudaArch::SM_30:
+ case CudaArch::SM_32:
+ case CudaArch::SM_35:
+ case CudaArch::SM_37:
+ case CudaArch::SM_50:
+ case CudaArch::SM_52:
+ case CudaArch::SM_53:
+ case CudaArch::SM_60:
+ case CudaArch::SM_61:
+ case CudaArch::SM_62: {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ Out << "Target architecture " << CudaArchToString(Arch)
+ << " does not support unified addressing";
+ CGM.Error(Clause->getBeginLoc(), Out.str());
+ return;
+ }
+ case CudaArch::SM_70:
+ case CudaArch::SM_72:
+ case CudaArch::SM_75:
+ case CudaArch::SM_80:
+ case CudaArch::GFX600:
+ case CudaArch::GFX601:
+ case CudaArch::GFX602:
+ case CudaArch::GFX700:
+ case CudaArch::GFX701:
+ case CudaArch::GFX702:
+ case CudaArch::GFX703:
+ case CudaArch::GFX704:
+ case CudaArch::GFX705:
+ case CudaArch::GFX801:
+ case CudaArch::GFX802:
+ case CudaArch::GFX803:
+ case CudaArch::GFX805:
+ case CudaArch::GFX810:
+ case CudaArch::GFX900:
+ case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX908:
+ case CudaArch::GFX909:
+ case CudaArch::GFX90c:
+ case CudaArch::GFX1010:
+ case CudaArch::GFX1011:
+ case CudaArch::GFX1012:
+ case CudaArch::GFX1030:
+ case CudaArch::GFX1031:
+ case CudaArch::GFX1032:
+ case CudaArch::GFX1033:
+ case CudaArch::UNUSED:
+ case CudaArch::UNKNOWN:
+ break;
+ case CudaArch::LAST:
+ llvm_unreachable("Unexpected Cuda arch.");
+ }
+ }
+ }
+ CGOpenMPRuntime::processRequiresDirective(D);
+}
+
+/// Get number of SMs and number of blocks per SM.
+static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
+ std::pair<unsigned, unsigned> Data;
+ if (CGM.getLangOpts().OpenMPCUDANumSMs)
+ Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
+ if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
+ Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
+ if (Data.first && Data.second)
+ return Data;
+ switch (getCudaArch(CGM)) {
+ case CudaArch::SM_20:
+ case CudaArch::SM_21:
+ case CudaArch::SM_30:
+ case CudaArch::SM_32:
+ case CudaArch::SM_35:
+ case CudaArch::SM_37:
+ case CudaArch::SM_50:
+ case CudaArch::SM_52:
+ case CudaArch::SM_53:
+ return {16, 16};
+ case CudaArch::SM_60:
+ case CudaArch::SM_61:
+ case CudaArch::SM_62:
+ return {56, 32};
+ case CudaArch::SM_70:
+ case CudaArch::SM_72:
+ case CudaArch::SM_75:
+ case CudaArch::SM_80:
+ return {84, 32};
+ case CudaArch::GFX600:
+ case CudaArch::GFX601:
+ case CudaArch::GFX602:
+ case CudaArch::GFX700:
+ case CudaArch::GFX701:
+ case CudaArch::GFX702:
+ case CudaArch::GFX703:
+ case CudaArch::GFX704:
+ case CudaArch::GFX705:
+ case CudaArch::GFX801:
+ case CudaArch::GFX802:
+ case CudaArch::GFX803:
+ case CudaArch::GFX805:
+ case CudaArch::GFX810:
+ case CudaArch::GFX900:
+ case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX908:
+ case CudaArch::GFX909:
+ case CudaArch::GFX90c:
+ case CudaArch::GFX1010:
+ case CudaArch::GFX1011:
+ case CudaArch::GFX1012:
+ case CudaArch::GFX1030:
+ case CudaArch::GFX1031:
+ case CudaArch::GFX1032:
+ case CudaArch::GFX1033:
+ case CudaArch::UNUSED:
+ case CudaArch::UNKNOWN:
+ break;
+ case CudaArch::LAST:
+ llvm_unreachable("Unexpected Cuda arch.");
+ }
+ llvm_unreachable("Unexpected NVPTX target without ptx feature.");
+}
+
+void CGOpenMPRuntimeGPU::clear() {
+ if (!GlobalizedRecords.empty() &&
+ !CGM.getLangOpts().OpenMPCUDATargetParallel) {
+ ASTContext &C = CGM.getContext();
+ llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
+ llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
+ RecordDecl *StaticRD = C.buildImplicitRecord(
+ "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
+ StaticRD->startDefinition();
+ RecordDecl *SharedStaticRD = C.buildImplicitRecord(
+ "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
+ SharedStaticRD->startDefinition();
+ for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
+ if (Records.Records.empty())
+ continue;
+ unsigned Size = 0;
+ unsigned RecAlignment = 0;
+ for (const RecordDecl *RD : Records.Records) {
+ QualType RDTy = C.getRecordType(RD);
+ unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
+ RecAlignment = std::max(RecAlignment, Alignment);
+ unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
+ Size =
+ llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
+ }
+ Size = llvm::alignTo(Size, RecAlignment);
+ llvm::APInt ArySize(/*numBits=*/64, Size);
+ QualType SubTy = C.getConstantArrayType(
+ C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
+ const bool UseSharedMemory = Size <= SharedMemorySize;
+ auto *Field =
+ FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
+ SourceLocation(), SourceLocation(), nullptr, SubTy,
+ C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ if (UseSharedMemory) {
+ SharedStaticRD->addDecl(Field);
+ SharedRecs.push_back(&Records);
+ } else {
+ StaticRD->addDecl(Field);
+ GlobalRecs.push_back(&Records);
+ }
+ Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
+ Records.UseSharedMemory->setInitializer(
+ llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
+ }
+ // Allocate SharedMemorySize buffer for the shared memory.
+ // FIXME: nvlink does not handle weak linkage correctly (object with the
+ // different size are reported as erroneous).
+ // Restore this code as sson as nvlink is fixed.
+ if (!SharedStaticRD->field_empty()) {
+ llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
+ QualType SubTy = C.getConstantArrayType(
+ C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
+ auto *Field = FieldDecl::Create(
+ C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
+ C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ SharedStaticRD->addDecl(Field);
+ }
+ SharedStaticRD->completeDefinition();
+ if (!SharedStaticRD->field_empty()) {
+ QualType StaticTy = C.getRecordType(SharedStaticRD);
+ llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), LLVMStaticTy,
+ /*isConstant=*/false, llvm::GlobalValue::WeakAnyLinkage,
+ llvm::UndefValue::get(LLVMStaticTy),
+ "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ C.getTargetAddressSpace(LangAS::cuda_shared));
+ auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ GV, CGM.VoidPtrTy);
+ for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
+ Rec->Buffer->replaceAllUsesWith(Replacement);
+ Rec->Buffer->eraseFromParent();
+ }
+ }
+ StaticRD->completeDefinition();
+ if (!StaticRD->field_empty()) {
+ QualType StaticTy = C.getRecordType(StaticRD);
+ std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
+ llvm::APInt Size1(32, SMsBlockPerSM.second);
+ QualType Arr1Ty =
+ C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ llvm::APInt Size2(32, SMsBlockPerSM.first);
+ QualType Arr2Ty =
+ C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
+ // FIXME: nvlink does not handle weak linkage correctly (object with the
+ // different size are reported as erroneous).
+ // Restore CommonLinkage as soon as nvlink is fixed.
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), LLVMArr2Ty,
+ /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(LLVMArr2Ty),
+ "_openmp_static_glob_rd_$_");
+ auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ GV, CGM.VoidPtrTy);
+ for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
+ Rec->Buffer->replaceAllUsesWith(Replacement);
+ Rec->Buffer->eraseFromParent();
+ }
+ }
+ }
+ if (!TeamsReductions.empty()) {
+ ASTContext &C = CGM.getContext();
+ RecordDecl *StaticRD = C.buildImplicitRecord(
+ "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
+ StaticRD->startDefinition();
+ for (const RecordDecl *TeamReductionRec : TeamsReductions) {
+ QualType RecTy = C.getRecordType(TeamReductionRec);
+ auto *Field = FieldDecl::Create(
+ C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
+ C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ StaticRD->addDecl(Field);
+ }
+ StaticRD->completeDefinition();
+ QualType StaticTy = C.getRecordType(StaticRD);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ // FIXME: nvlink does not handle weak linkage correctly (object with the
+ // different size are reported as erroneous).
+ // Restore CommonLinkage as soon as nvlink is fixed.
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), LLVMReductionsBufferTy,
+ /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(LLVMReductionsBufferTy),
+ "_openmp_teams_reductions_buffer_$_");
+ KernelTeamsReductionPtr->setInitializer(
+ llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
+ CGM.VoidPtrTy));
+ }
+ CGOpenMPRuntime::clear();
+}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
new file mode 100644
index 000000000000..7267511ca672
--- /dev/null
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
@@ -0,0 +1,504 @@
+//===------ CGOpenMPRuntimeGPU.h - Interface to OpenMP GPU Runtimes ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a generalized class for OpenMP runtime code generation
+// specialized by GPU targets NVPTX and AMDGCN.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMEGPU_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMEGPU_H
+
+#include "CGOpenMPRuntime.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtOpenMP.h"
+#include "llvm/Frontend/OpenMP/OMPGridValues.h"
+
+namespace clang {
+namespace CodeGen {
+
+class CGOpenMPRuntimeGPU : public CGOpenMPRuntime {
+public:
+ /// Defines the execution mode.
+ enum ExecutionMode {
+ /// SPMD execution mode (all threads are worker threads).
+ EM_SPMD,
+ /// Non-SPMD execution mode (1 master thread, others are workers).
+ EM_NonSPMD,
+ /// Unknown execution mode (orphaned directive).
+ EM_Unknown,
+ };
+private:
+ /// Parallel outlined function work for workers to execute.
+ llvm::SmallVector<llvm::Function *, 16> Work;
+
+ struct EntryFunctionState {
+ llvm::BasicBlock *ExitBB = nullptr;
+ };
+
+ class WorkerFunctionState {
+ public:
+ llvm::Function *WorkerFn;
+ const CGFunctionInfo &CGFI;
+ SourceLocation Loc;
+
+ WorkerFunctionState(CodeGenModule &CGM, SourceLocation Loc);
+
+ private:
+ void createWorkerFunction(CodeGenModule &CGM);
+ };
+
+ ExecutionMode getExecutionMode() const;
+
+ bool requiresFullRuntime() const { return RequiresFullRuntime; }
+
+ /// Get barrier to synchronize all threads in a block.
+ void syncCTAThreads(CodeGenFunction &CGF);
+
+ /// Emit the worker function for the current target region.
+ void emitWorkerFunction(WorkerFunctionState &WST);
+
+ /// Helper for worker function. Emit body of worker loop.
+ void emitWorkerLoop(CodeGenFunction &CGF, WorkerFunctionState &WST);
+
+ /// Helper for non-SPMD target entry function. Guide the master and
+ /// worker threads to their respective locations.
+ void emitNonSPMDEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
+ WorkerFunctionState &WST);
+
+ /// Signal termination of OMP execution for non-SPMD target entry
+ /// function.
+ void emitNonSPMDEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
+
+ /// Helper for generic variables globalization prolog.
+ void emitGenericVarsProlog(CodeGenFunction &CGF, SourceLocation Loc,
+ bool WithSPMDCheck = false);
+
+ /// Helper for generic variables globalization epilog.
+ void emitGenericVarsEpilog(CodeGenFunction &CGF, bool WithSPMDCheck = false);
+
+ /// Helper for SPMD mode target directive's entry function.
+ void emitSPMDEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
+ const OMPExecutableDirective &D);
+
+ /// Signal termination of SPMD mode execution.
+ void emitSPMDEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
+
+ //
+ // Base class overrides.
+ //
+
+ /// Creates offloading entry for the provided entry ID \a ID,
+ /// address \a Addr, size \a Size, and flags \a Flags.
+ void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
+ uint64_t Size, int32_t Flags,
+ llvm::GlobalValue::LinkageTypes Linkage) override;
+
+ /// Emit outlined function specialized for the Fork-Join
+ /// programming model for applicable target directives on the NVPTX device.
+ /// \param D Directive to emit.
+ /// \param ParentName Name of the function that encloses the target region.
+ /// \param OutlinedFn Outlined function value to be defined by this call.
+ /// \param OutlinedFnID Outlined function ID value to be defined by this call.
+ /// \param IsOffloadEntry True if the outlined function is an offload entry.
+ /// An outlined function may not be an entry if, e.g. the if clause always
+ /// evaluates to false.
+ void emitNonSPMDKernel(const OMPExecutableDirective &D, StringRef ParentName,
+ llvm::Function *&OutlinedFn,
+ llvm::Constant *&OutlinedFnID, bool IsOffloadEntry,
+ const RegionCodeGenTy &CodeGen);
+
+ /// Emit outlined function specialized for the Single Program
+ /// Multiple Data programming model for applicable target directives on the
+ /// NVPTX device.
+ /// \param D Directive to emit.
+ /// \param ParentName Name of the function that encloses the target region.
+ /// \param OutlinedFn Outlined function value to be defined by this call.
+ /// \param OutlinedFnID Outlined function ID value to be defined by this call.
+ /// \param IsOffloadEntry True if the outlined function is an offload entry.
+ /// \param CodeGen Object containing the target statements.
+ /// An outlined function may not be an entry if, e.g. the if clause always
+ /// evaluates to false.
+ void emitSPMDKernel(const OMPExecutableDirective &D, StringRef ParentName,
+ llvm::Function *&OutlinedFn,
+ llvm::Constant *&OutlinedFnID, bool IsOffloadEntry,
+ const RegionCodeGenTy &CodeGen);
+
+ /// Emit outlined function for 'target' directive on the NVPTX
+ /// device.
+ /// \param D Directive to emit.
+ /// \param ParentName Name of the function that encloses the target region.
+ /// \param OutlinedFn Outlined function value to be defined by this call.
+ /// \param OutlinedFnID Outlined function ID value to be defined by this call.
+ /// \param IsOffloadEntry True if the outlined function is an offload entry.
+ /// An outlined function may not be an entry if, e.g. the if clause always
+ /// evaluates to false.
+ void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
+ StringRef ParentName,
+ llvm::Function *&OutlinedFn,
+ llvm::Constant *&OutlinedFnID,
+ bool IsOffloadEntry,
+ const RegionCodeGenTy &CodeGen) override;
+
+ /// Emits code for parallel or serial call of the \a OutlinedFn with
+ /// variables captured in a record which address is stored in \a
+ /// CapturedStruct.
+ /// This call is for the Non-SPMD Execution Mode.
+ /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
+ /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
+ /// \param CapturedVars A pointer to the record with the references to
+ /// variables used in \a OutlinedFn function.
+ /// \param IfCond Condition in the associated 'if' clause, if it was
+ /// specified, nullptr otherwise.
+ void emitNonSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars,
+ const Expr *IfCond);
+
+ /// Emits code for parallel or serial call of the \a OutlinedFn with
+ /// variables captured in a record which address is stored in \a
+ /// CapturedStruct.
+ /// This call is for a parallel directive within an SPMD target directive.
+ /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
+ /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
+ /// \param CapturedVars A pointer to the record with the references to
+ /// variables used in \a OutlinedFn function.
+ /// \param IfCond Condition in the associated 'if' clause, if it was
+ /// specified, nullptr otherwise.
+ ///
+ void emitSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars,
+ const Expr *IfCond);
+
+protected:
+ /// Get the function name of an outlined region.
+ // The name can be customized depending on the target.
+ //
+ StringRef getOutlinedHelperName() const override {
+ return "__omp_outlined__";
+ }
+
+ /// Check if the default location must be constant.
+ /// Constant for NVPTX for better optimization.
+ bool isDefaultLocationConstant() const override { return true; }
+
+ /// Returns additional flags that can be stored in reserved_2 field of the
+ /// default location.
+ /// For NVPTX target contains data about SPMD/Non-SPMD execution mode +
+ /// Full/Lightweight runtime mode. Used for better optimization.
+ unsigned getDefaultLocationReserved2Flags() const override;
+
+public:
+ explicit CGOpenMPRuntimeGPU(CodeGenModule &CGM);
+ void clear() override;
+
+ /// Declare generalized virtual functions which need to be defined
+ /// by all specializations of OpenMPGPURuntime Targets like AMDGCN
+ /// and NVPTX.
+
+ /// Get the GPU warp size.
+ virtual llvm::Value *getGPUWarpSize(CodeGenFunction &CGF) = 0;
+
+ /// Get the id of the current thread on the GPU.
+ virtual llvm::Value *getGPUThreadID(CodeGenFunction &CGF) = 0;
+
+ /// Get the maximum number of threads in a block of the GPU.
+ virtual llvm::Value *getGPUNumThreads(CodeGenFunction &CGF) = 0;
+
+ /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
+ /// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
+ virtual void emitProcBindClause(CodeGenFunction &CGF,
+ llvm::omp::ProcBindKind ProcBind,
+ SourceLocation Loc) override;
+
+ /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
+ /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
+ /// clause.
+ /// \param NumThreads An integer value of threads.
+ virtual void emitNumThreadsClause(CodeGenFunction &CGF,
+ llvm::Value *NumThreads,
+ SourceLocation Loc) override;
+
+ /// This function ought to emit, in the general case, a call to
+ // the openmp runtime kmpc_push_num_teams. In NVPTX backend it is not needed
+ // as these numbers are obtained through the PTX grid and block configuration.
+ /// \param NumTeams An integer expression of teams.
+ /// \param ThreadLimit An integer expression of threads.
+ void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
+ const Expr *ThreadLimit, SourceLocation Loc) override;
+
+ /// Emits inlined function for the specified OpenMP parallel
+ // directive.
+ /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
+ /// kmp_int32 BoundID, struct context_vars*).
+ /// \param D OpenMP directive.
+ /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
+ /// \param InnermostKind Kind of innermost directive (for simple directives it
+ /// is a directive itself, for combined - its innermost directive).
+ /// \param CodeGen Code generation sequence for the \a D directive.
+ llvm::Function *
+ emitParallelOutlinedFunction(const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
+
+ /// Emits inlined function for the specified OpenMP teams
+ // directive.
+ /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
+ /// kmp_int32 BoundID, struct context_vars*).
+ /// \param D OpenMP directive.
+ /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
+ /// \param InnermostKind Kind of innermost directive (for simple directives it
+ /// is a directive itself, for combined - its innermost directive).
+ /// \param CodeGen Code generation sequence for the \a D directive.
+ llvm::Function *
+ emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
+
+ /// Emits code for teams call of the \a OutlinedFn with
+ /// variables captured in a record which address is stored in \a
+ /// CapturedStruct.
+ /// \param OutlinedFn Outlined function to be run by team masters. Type of
+ /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
+ /// \param CapturedVars A pointer to the record with the references to
+ /// variables used in \a OutlinedFn function.
+ ///
+ void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ SourceLocation Loc, llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars) override;
+
+ /// Emits code for parallel or serial call of the \a OutlinedFn with
+ /// variables captured in a record which address is stored in \a
+ /// CapturedStruct.
+ /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
+ /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
+ /// \param CapturedVars A pointer to the record with the references to
+ /// variables used in \a OutlinedFn function.
+ /// \param IfCond Condition in the associated 'if' clause, if it was
+ /// specified, nullptr otherwise.
+ void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars,
+ const Expr *IfCond) override;
+
+ /// Emit an implicit/explicit barrier for OpenMP threads.
+ /// \param Kind Directive for which this implicit barrier call must be
+ /// generated. Must be OMPD_barrier for explicit barrier generation.
+ /// \param EmitChecks true if need to emit checks for cancellation barriers.
+ /// \param ForceSimpleCall true simple barrier call must be emitted, false if
+ /// runtime class decides which one to emit (simple or with cancellation
+ /// checks).
+ ///
+ void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind Kind, bool EmitChecks = true,
+ bool ForceSimpleCall = false) override;
+
+ /// Emits a critical region.
+ /// \param CriticalName Name of the critical region.
+ /// \param CriticalOpGen Generator for the statement associated with the given
+ /// critical region.
+ /// \param Hint Value of the 'hint' clause (optional).
+ void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
+ const RegionCodeGenTy &CriticalOpGen,
+ SourceLocation Loc,
+ const Expr *Hint = nullptr) override;
+
+ /// Emit a code for reduction clause.
+ ///
+ /// \param Privates List of private copies for original reduction arguments.
+ /// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
+ /// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
+ /// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
+ /// or 'operator binop(LHS, RHS)'.
+ /// \param Options List of options for reduction codegen:
+ /// WithNowait true if parent directive has also nowait clause, false
+ /// otherwise.
+ /// SimpleReduction Emit reduction operation only. Used for omp simd
+ /// directive on the host.
+ /// ReductionKind The kind of reduction to perform.
+ virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
+ ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps,
+ ReductionOptionsTy Options) override;
+
+ /// Returns specified OpenMP runtime function for the current OpenMP
+ /// implementation. Specialized for the NVPTX device.
+ /// \param Function OpenMP runtime function.
+ /// \return Specified function.
+ llvm::FunctionCallee createNVPTXRuntimeFunction(unsigned Function);
+
+ /// Translates the native parameter of outlined function if this is required
+ /// for target.
+ /// \param FD Field decl from captured record for the parameter.
+ /// \param NativeParam Parameter itself.
+ const VarDecl *translateParameter(const FieldDecl *FD,
+ const VarDecl *NativeParam) const override;
+
+ /// Gets the address of the native argument basing on the address of the
+ /// target-specific parameter.
+ /// \param NativeParam Parameter itself.
+ /// \param TargetParam Corresponding target-specific parameter.
+ Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
+ const VarDecl *TargetParam) const override;
+
+ /// Emits call of the outlined function with the provided arguments,
+ /// translating these arguments to correct target-specific arguments.
+ void emitOutlinedFunctionCall(
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
+ ArrayRef<llvm::Value *> Args = llvm::None) const override;
+
+ /// Emits OpenMP-specific function prolog.
+ /// Required for device constructs.
+ void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) override;
+
+ /// Gets the OpenMP-specific address of the local variable.
+ Address getAddressOfLocalVariable(CodeGenFunction &CGF,
+ const VarDecl *VD) override;
+
+ /// Target codegen is specialized based on two data-sharing modes: CUDA, in
+ /// which the local variables are actually global threadlocal, and Generic, in
+ /// which the local variables are placed in global memory if they may escape
+ /// their declaration context.
+ enum DataSharingMode {
+ /// CUDA data sharing mode.
+ CUDA,
+ /// Generic data-sharing mode.
+ Generic,
+ };
+
+ /// Cleans up references to the objects in finished function.
+ ///
+ void functionFinished(CodeGenFunction &CGF) override;
+
+ /// Choose a default value for the dist_schedule clause.
+ void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
+ const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
+ llvm::Value *&Chunk) const override;
+
+ /// Choose a default value for the schedule clause.
+ void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
+ const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
+ const Expr *&ChunkExpr) const override;
+
+ /// Adjust some parameters for the target-based directives, like addresses of
+ /// the variables captured by reference in lambdas.
+ void adjustTargetSpecificDataForLambdas(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) const override;
+
+ /// Perform check on requires decl to ensure that target architecture
+ /// supports unified addressing
+ void processRequiresDirective(const OMPRequiresDecl *D) override;
+
+ /// Returns default address space for the constant firstprivates, __constant__
+ /// address space by default.
+ unsigned getDefaultFirstprivateAddressSpace() const override;
+
+ /// Checks if the variable has associated OMPAllocateDeclAttr attribute with
+ /// the predefined allocator and translates it into the corresponding address
+ /// space.
+ bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS) override;
+
+private:
+ /// Track the execution mode when codegening directives within a target
+ /// region. The appropriate mode (SPMD/NON-SPMD) is set on entry to the
+ /// target region and used by containing directives such as 'parallel'
+ /// to emit optimized code.
+ ExecutionMode CurrentExecutionMode = EM_Unknown;
+
+ /// Check if the full runtime is required (default - yes).
+ bool RequiresFullRuntime = true;
+
+ /// true if we're emitting the code for the target region and next parallel
+ /// region is L0 for sure.
+ bool IsInTargetMasterThreadRegion = false;
+ /// true if currently emitting code for target/teams/distribute region, false
+ /// - otherwise.
+ bool IsInTTDRegion = false;
+ /// true if we're definitely in the parallel region.
+ bool IsInParallelRegion = false;
+
+ /// Map between an outlined function and its wrapper.
+ llvm::DenseMap<llvm::Function *, llvm::Function *> WrapperFunctionsMap;
+
+ /// Emit function which wraps the outline parallel region
+ /// and controls the parameters which are passed to this function.
+ /// The wrapper ensures that the outlined function is called
+ /// with the correct arguments when data is shared.
+ llvm::Function *createParallelDataSharingWrapper(
+ llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D);
+
+ /// The data for the single globalized variable.
+ struct MappedVarData {
+ /// Corresponding field in the global record.
+ const FieldDecl *FD = nullptr;
+ /// Corresponding address.
+ Address PrivateAddr = Address::invalid();
+ /// true, if only one element is required (for latprivates in SPMD mode),
+ /// false, if need to create based on the warp-size.
+ bool IsOnePerTeam = false;
+ MappedVarData() = delete;
+ MappedVarData(const FieldDecl *FD, bool IsOnePerTeam = false)
+ : FD(FD), IsOnePerTeam(IsOnePerTeam) {}
+ };
+ /// The map of local variables to their addresses in the global memory.
+ using DeclToAddrMapTy = llvm::MapVector<const Decl *, MappedVarData>;
+ /// Set of the parameters passed by value escaping OpenMP context.
+ using EscapedParamsTy = llvm::SmallPtrSet<const Decl *, 4>;
+ struct FunctionData {
+ DeclToAddrMapTy LocalVarData;
+ llvm::Optional<DeclToAddrMapTy> SecondaryLocalVarData = llvm::None;
+ EscapedParamsTy EscapedParameters;
+ llvm::SmallVector<const ValueDecl*, 4> EscapedVariableLengthDecls;
+ llvm::SmallVector<llvm::Value *, 4> EscapedVariableLengthDeclsAddrs;
+ const RecordDecl *GlobalRecord = nullptr;
+ llvm::Optional<const RecordDecl *> SecondaryGlobalRecord = llvm::None;
+ llvm::Value *GlobalRecordAddr = nullptr;
+ llvm::Value *IsInSPMDModeFlag = nullptr;
+ std::unique_ptr<CodeGenFunction::OMPMapVars> MappedParams;
+ };
+ /// Maps the function to the list of the globalized variables with their
+ /// addresses.
+ llvm::SmallDenseMap<llvm::Function *, FunctionData> FunctionGlobalizedDecls;
+ /// List of records for the globalized variables in target/teams/distribute
+ /// contexts. Inner records are going to be joined into the single record,
+ /// while those resulting records are going to be joined into the single
+ /// union. This resulting union (one per CU) is the entry point for the static
+ /// memory management runtime functions.
+ struct GlobalPtrSizeRecsTy {
+ llvm::GlobalVariable *UseSharedMemory = nullptr;
+ llvm::GlobalVariable *RecSize = nullptr;
+ llvm::GlobalVariable *Buffer = nullptr;
+ SourceLocation Loc;
+ llvm::SmallVector<const RecordDecl *, 2> Records;
+ unsigned RegionCounter = 0;
+ };
+ llvm::SmallVector<GlobalPtrSizeRecsTy, 8> GlobalizedRecords;
+ llvm::GlobalVariable *KernelTeamsReductionPtr = nullptr;
+ /// List of the records with the list of fields for the reductions across the
+ /// teams. Used to build the intermediate buffer for the fast teams
+ /// reductions.
+ /// All the records are gathered into a union `union.type` is created.
+ llvm::SmallVector<const RecordDecl *, 4> TeamsReductions;
+ /// Shared pointer for the global memory in the global memory buffer used for
+ /// the given kernel.
+ llvm::GlobalVariable *KernelStaticGlobalized = nullptr;
+ /// Pair of the Non-SPMD team and all reductions variables in this team
+ /// region.
+ std::pair<const Decl *, llvm::SmallVector<const ValueDecl *, 4>>
+ TeamAndReductions;
+};
+
+} // CodeGen namespace.
+} // clang namespace.
+
+#endif // LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMEGPU_H
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
index cbd443134e7a..1688d07b90b6 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
@@ -7,11 +7,12 @@
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation specialized to NVPTX
-// targets.
+// targets from generalized CGOpenMPRuntimeGPU class.
//
//===----------------------------------------------------------------------===//
#include "CGOpenMPRuntimeNVPTX.h"
+#include "CGOpenMPRuntimeGPU.h"
#include "CodeGenFunction.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclOpenMP.h"
@@ -25,5223 +26,31 @@ using namespace clang;
using namespace CodeGen;
using namespace llvm::omp;
-namespace {
-enum OpenMPRTLFunctionNVPTX {
- /// Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
- /// int16_t RequiresOMPRuntime);
- OMPRTL_NVPTX__kmpc_kernel_init,
- /// Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
- OMPRTL_NVPTX__kmpc_kernel_deinit,
- /// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
- /// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
- OMPRTL_NVPTX__kmpc_spmd_kernel_init,
- /// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
- OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2,
- /// Call to void __kmpc_kernel_prepare_parallel(void
- /// *outlined_function);
- OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
- /// Call to bool __kmpc_kernel_parallel(void **outlined_function);
- OMPRTL_NVPTX__kmpc_kernel_parallel,
- /// Call to void __kmpc_kernel_end_parallel();
- OMPRTL_NVPTX__kmpc_kernel_end_parallel,
- /// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
- /// global_tid);
- OMPRTL_NVPTX__kmpc_serialized_parallel,
- /// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
- /// global_tid);
- OMPRTL_NVPTX__kmpc_end_serialized_parallel,
- /// Call to int32_t __kmpc_shuffle_int32(int32_t element,
- /// int16_t lane_offset, int16_t warp_size);
- OMPRTL_NVPTX__kmpc_shuffle_int32,
- /// Call to int64_t __kmpc_shuffle_int64(int64_t element,
- /// int16_t lane_offset, int16_t warp_size);
- OMPRTL_NVPTX__kmpc_shuffle_int64,
- /// Call to __kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, kmp_int32
- /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
- /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
- /// lane_offset, int16_t shortCircuit),
- /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
- OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2,
- /// Call to __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
- /// global_tid, void *global_buffer, int32_t num_of_records, void*
- /// reduce_data,
- /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
- /// lane_offset, int16_t shortCircuit),
- /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
- /// (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
- /// void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
- /// void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
- /// int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
- /// *buffer, int idx, void *reduce_data));
- OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2,
- /// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
- OMPRTL_NVPTX__kmpc_end_reduce_nowait,
- /// Call to void __kmpc_data_sharing_init_stack();
- OMPRTL_NVPTX__kmpc_data_sharing_init_stack,
- /// Call to void __kmpc_data_sharing_init_stack_spmd();
- OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd,
- /// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size,
- /// int16_t UseSharedMemory);
- OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack,
- /// Call to void* __kmpc_data_sharing_push_stack(size_t size, int16_t
- /// UseSharedMemory);
- OMPRTL_NVPTX__kmpc_data_sharing_push_stack,
- /// Call to void __kmpc_data_sharing_pop_stack(void *a);
- OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
- /// Call to void __kmpc_begin_sharing_variables(void ***args,
- /// size_t n_args);
- OMPRTL_NVPTX__kmpc_begin_sharing_variables,
- /// Call to void __kmpc_end_sharing_variables();
- OMPRTL_NVPTX__kmpc_end_sharing_variables,
- /// Call to void __kmpc_get_shared_variables(void ***GlobalArgs)
- OMPRTL_NVPTX__kmpc_get_shared_variables,
- /// Call to uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32
- /// global_tid);
- OMPRTL_NVPTX__kmpc_parallel_level,
- /// Call to int8_t __kmpc_is_spmd_exec_mode();
- OMPRTL_NVPTX__kmpc_is_spmd_exec_mode,
- /// Call to void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
- /// const void *buf, size_t size, int16_t is_shared, const void **res);
- OMPRTL_NVPTX__kmpc_get_team_static_memory,
- /// Call to void __kmpc_restore_team_static_memory(int16_t
- /// isSPMDExecutionMode, int16_t is_shared);
- OMPRTL_NVPTX__kmpc_restore_team_static_memory,
- /// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_barrier,
- /// Call to void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
- /// global_tid);
- OMPRTL__kmpc_barrier_simple_spmd,
- /// Call to int32_t __kmpc_warp_active_thread_mask(void);
- OMPRTL_NVPTX__kmpc_warp_active_thread_mask,
- /// Call to void __kmpc_syncwarp(int32_t Mask);
- OMPRTL_NVPTX__kmpc_syncwarp,
-};
-
-/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
-class NVPTXActionTy final : public PrePostActionTy {
- llvm::FunctionCallee EnterCallee = nullptr;
- ArrayRef<llvm::Value *> EnterArgs;
- llvm::FunctionCallee ExitCallee = nullptr;
- ArrayRef<llvm::Value *> ExitArgs;
- bool Conditional = false;
- llvm::BasicBlock *ContBlock = nullptr;
-
-public:
- NVPTXActionTy(llvm::FunctionCallee EnterCallee,
- ArrayRef<llvm::Value *> EnterArgs,
- llvm::FunctionCallee ExitCallee,
- ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
- : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
- ExitArgs(ExitArgs), Conditional(Conditional) {}
- void Enter(CodeGenFunction &CGF) override {
- llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
- if (Conditional) {
- llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
- auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
- ContBlock = CGF.createBasicBlock("omp_if.end");
- // Generate the branch (If-stmt)
- CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
- CGF.EmitBlock(ThenBlock);
- }
- }
- void Done(CodeGenFunction &CGF) {
- // Emit the rest of blocks/branches
- CGF.EmitBranch(ContBlock);
- CGF.EmitBlock(ContBlock, true);
- }
- void Exit(CodeGenFunction &CGF) override {
- CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
- }
-};
-
-/// A class to track the execution mode when codegening directives within
-/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
-/// to the target region and used by containing directives such as 'parallel'
-/// to emit optimized code.
-class ExecutionRuntimeModesRAII {
-private:
- CGOpenMPRuntimeNVPTX::ExecutionMode SavedExecMode =
- CGOpenMPRuntimeNVPTX::EM_Unknown;
- CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode;
- bool SavedRuntimeMode = false;
- bool *RuntimeMode = nullptr;
-
-public:
- /// Constructor for Non-SPMD mode.
- ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode)
- : ExecMode(ExecMode) {
- SavedExecMode = ExecMode;
- ExecMode = CGOpenMPRuntimeNVPTX::EM_NonSPMD;
- }
- /// Constructor for SPMD mode.
- ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode,
- bool &RuntimeMode, bool FullRuntimeMode)
- : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
- SavedExecMode = ExecMode;
- SavedRuntimeMode = RuntimeMode;
- ExecMode = CGOpenMPRuntimeNVPTX::EM_SPMD;
- RuntimeMode = FullRuntimeMode;
- }
- ~ExecutionRuntimeModesRAII() {
- ExecMode = SavedExecMode;
- if (RuntimeMode)
- *RuntimeMode = SavedRuntimeMode;
- }
-};
-
-/// GPU Configuration: This information can be derived from cuda registers,
-/// however, providing compile time constants helps generate more efficient
-/// code. For all practical purposes this is fine because the configuration
-/// is the same for all known NVPTX architectures.
-enum MachineConfiguration : unsigned {
- WarpSize = 32,
- /// Number of bits required to represent a lane identifier, which is
- /// computed as log_2(WarpSize).
- LaneIDBits = 5,
- LaneIDMask = WarpSize - 1,
-
- /// Global memory alignment for performance.
- GlobalMemoryAlignment = 128,
-
- /// Maximal size of the shared memory buffer.
- SharedMemorySize = 128,
-};
-
-static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
- RefExpr = RefExpr->IgnoreParens();
- if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
- const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
- while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
- Base = TempASE->getBase()->IgnoreParenImpCasts();
- RefExpr = Base;
- } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
- const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
- while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
- Base = TempOASE->getBase()->IgnoreParenImpCasts();
- while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
- Base = TempASE->getBase()->IgnoreParenImpCasts();
- RefExpr = Base;
- }
- RefExpr = RefExpr->IgnoreParenImpCasts();
- if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
- return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
- const auto *ME = cast<MemberExpr>(RefExpr);
- return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
-}
-
-
-static RecordDecl *buildRecordForGlobalizedVars(
- ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
- ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
- llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &MappedDeclsFields, int BufSize) {
- using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
- if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
- return nullptr;
- SmallVector<VarsDataTy, 4> GlobalizedVars;
- for (const ValueDecl *D : EscapedDecls)
- GlobalizedVars.emplace_back(
- CharUnits::fromQuantity(std::max(
- C.getDeclAlign(D).getQuantity(),
- static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
- D);
- for (const ValueDecl *D : EscapedDeclsForTeams)
- GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
- llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
- return L.first > R.first;
- });
-
- // Build struct _globalized_locals_ty {
- // /* globalized vars */[WarSize] align (max(decl_align,
- // GlobalMemoryAlignment))
- // /* globalized vars */ for EscapedDeclsForTeams
- // };
- RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
- GlobalizedRD->startDefinition();
- llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
- EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
- for (const auto &Pair : GlobalizedVars) {
- const ValueDecl *VD = Pair.second;
- QualType Type = VD->getType();
- if (Type->isLValueReferenceType())
- Type = C.getPointerType(Type.getNonReferenceType());
- else
- Type = Type.getNonReferenceType();
- SourceLocation Loc = VD->getLocation();
- FieldDecl *Field;
- if (SingleEscaped.count(VD)) {
- Field = FieldDecl::Create(
- C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
- C.getTrivialTypeSourceInfo(Type, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- if (VD->hasAttrs()) {
- for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
- E(VD->getAttrs().end());
- I != E; ++I)
- Field->addAttr(*I);
- }
- } else {
- llvm::APInt ArraySize(32, BufSize);
- Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
- 0);
- Field = FieldDecl::Create(
- C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
- C.getTrivialTypeSourceInfo(Type, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
- static_cast<CharUnits::QuantityType>(
- GlobalMemoryAlignment)));
- Field->addAttr(AlignedAttr::CreateImplicit(
- C, /*IsAlignmentExpr=*/true,
- IntegerLiteral::Create(C, Align,
- C.getIntTypeForBitwidth(32, /*Signed=*/0),
- SourceLocation()),
- {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
- }
- GlobalizedRD->addDecl(Field);
- MappedDeclsFields.try_emplace(VD, Field);
- }
- GlobalizedRD->completeDefinition();
- return GlobalizedRD;
+CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
+ : CGOpenMPRuntimeGPU(CGM) {
+ if (!CGM.getLangOpts().OpenMPIsDevice)
+ llvm_unreachable("OpenMP NVPTX can only handle device code.");
}
-/// Get the list of variables that can escape their declaration context.
-class CheckVarsEscapingDeclContext final
- : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
- CodeGenFunction &CGF;
- llvm::SetVector<const ValueDecl *> EscapedDecls;
- llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
- llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
- RecordDecl *GlobalizedRD = nullptr;
- llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
- bool AllEscaped = false;
- bool IsForCombinedParallelRegion = false;
-
- void markAsEscaped(const ValueDecl *VD) {
- // Do not globalize declare target variables.
- if (!isa<VarDecl>(VD) ||
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
- return;
- VD = cast<ValueDecl>(VD->getCanonicalDecl());
- // Use user-specified allocation.
- if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
- return;
- // Variables captured by value must be globalized.
- if (auto *CSI = CGF.CapturedStmtInfo) {
- if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
- // Check if need to capture the variable that was already captured by
- // value in the outer region.
- if (!IsForCombinedParallelRegion) {
- if (!FD->hasAttrs())
- return;
- const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
- if (!Attr)
- return;
- if (((Attr->getCaptureKind() != OMPC_map) &&
- !isOpenMPPrivate(Attr->getCaptureKind())) ||
- ((Attr->getCaptureKind() == OMPC_map) &&
- !FD->getType()->isAnyPointerType()))
- return;
- }
- if (!FD->getType()->isReferenceType()) {
- assert(!VD->getType()->isVariablyModifiedType() &&
- "Parameter captured by value with variably modified type");
- EscapedParameters.insert(VD);
- } else if (!IsForCombinedParallelRegion) {
- return;
- }
- }
- }
- if ((!CGF.CapturedStmtInfo ||
- (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
- VD->getType()->isReferenceType())
- // Do not globalize variables with reference type.
- return;
- if (VD->getType()->isVariablyModifiedType())
- EscapedVariableLengthDecls.insert(VD);
- else
- EscapedDecls.insert(VD);
- }
-
- void VisitValueDecl(const ValueDecl *VD) {
- if (VD->getType()->isLValueReferenceType())
- markAsEscaped(VD);
- if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
- if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
- const bool SavedAllEscaped = AllEscaped;
- AllEscaped = VD->getType()->isLValueReferenceType();
- Visit(VarD->getInit());
- AllEscaped = SavedAllEscaped;
- }
- }
- }
- void VisitOpenMPCapturedStmt(const CapturedStmt *S,
- ArrayRef<OMPClause *> Clauses,
- bool IsCombinedParallelRegion) {
- if (!S)
- return;
- for (const CapturedStmt::Capture &C : S->captures()) {
- if (C.capturesVariable() && !C.capturesVariableByCopy()) {
- const ValueDecl *VD = C.getCapturedVar();
- bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
- if (IsCombinedParallelRegion) {
- // Check if the variable is privatized in the combined construct and
- // those private copies must be shared in the inner parallel
- // directive.
- IsForCombinedParallelRegion = false;
- for (const OMPClause *C : Clauses) {
- if (!isOpenMPPrivate(C->getClauseKind()) ||
- C->getClauseKind() == OMPC_reduction ||
- C->getClauseKind() == OMPC_linear ||
- C->getClauseKind() == OMPC_private)
- continue;
- ArrayRef<const Expr *> Vars;
- if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
- Vars = PC->getVarRefs();
- else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
- Vars = PC->getVarRefs();
- else
- llvm_unreachable("Unexpected clause.");
- for (const auto *E : Vars) {
- const Decl *D =
- cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
- if (D == VD->getCanonicalDecl()) {
- IsForCombinedParallelRegion = true;
- break;
- }
- }
- if (IsForCombinedParallelRegion)
- break;
- }
- }
- markAsEscaped(VD);
- if (isa<OMPCapturedExprDecl>(VD))
- VisitValueDecl(VD);
- IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
- }
- }
- }
-
- void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
- assert(!GlobalizedRD &&
- "Record for globalized variables is built already.");
- ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
- if (IsInTTDRegion)
- EscapedDeclsForTeams = EscapedDecls.getArrayRef();
- else
- EscapedDeclsForParallel = EscapedDecls.getArrayRef();
- GlobalizedRD = ::buildRecordForGlobalizedVars(
- CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
- MappedDeclsFields, WarpSize);
- }
-
-public:
- CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
- ArrayRef<const ValueDecl *> TeamsReductions)
- : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
- }
- virtual ~CheckVarsEscapingDeclContext() = default;
- void VisitDeclStmt(const DeclStmt *S) {
- if (!S)
- return;
- for (const Decl *D : S->decls())
- if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
- VisitValueDecl(VD);
- }
- void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
- if (!D)
- return;
- if (!D->hasAssociatedStmt())
- return;
- if (const auto *S =
- dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
- // Do not analyze directives that do not actually require capturing,
- // like `omp for` or `omp simd` directives.
- llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
- getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
- if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
- VisitStmt(S->getCapturedStmt());
- return;
- }
- VisitOpenMPCapturedStmt(
- S, D->clauses(),
- CaptureRegions.back() == OMPD_parallel &&
- isOpenMPDistributeDirective(D->getDirectiveKind()));
- }
- }
- void VisitCapturedStmt(const CapturedStmt *S) {
- if (!S)
- return;
- for (const CapturedStmt::Capture &C : S->captures()) {
- if (C.capturesVariable() && !C.capturesVariableByCopy()) {
- const ValueDecl *VD = C.getCapturedVar();
- markAsEscaped(VD);
- if (isa<OMPCapturedExprDecl>(VD))
- VisitValueDecl(VD);
- }
- }
- }
- void VisitLambdaExpr(const LambdaExpr *E) {
- if (!E)
- return;
- for (const LambdaCapture &C : E->captures()) {
- if (C.capturesVariable()) {
- if (C.getCaptureKind() == LCK_ByRef) {
- const ValueDecl *VD = C.getCapturedVar();
- markAsEscaped(VD);
- if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
- VisitValueDecl(VD);
- }
- }
- }
- }
- void VisitBlockExpr(const BlockExpr *E) {
- if (!E)
- return;
- for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
- if (C.isByRef()) {
- const VarDecl *VD = C.getVariable();
- markAsEscaped(VD);
- if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
- VisitValueDecl(VD);
- }
- }
- }
- void VisitCallExpr(const CallExpr *E) {
- if (!E)
- return;
- for (const Expr *Arg : E->arguments()) {
- if (!Arg)
- continue;
- if (Arg->isLValue()) {
- const bool SavedAllEscaped = AllEscaped;
- AllEscaped = true;
- Visit(Arg);
- AllEscaped = SavedAllEscaped;
- } else {
- Visit(Arg);
- }
- }
- Visit(E->getCallee());
- }
- void VisitDeclRefExpr(const DeclRefExpr *E) {
- if (!E)
- return;
- const ValueDecl *VD = E->getDecl();
- if (AllEscaped)
- markAsEscaped(VD);
- if (isa<OMPCapturedExprDecl>(VD))
- VisitValueDecl(VD);
- else if (const auto *VarD = dyn_cast<VarDecl>(VD))
- if (VarD->isInitCapture())
- VisitValueDecl(VD);
- }
- void VisitUnaryOperator(const UnaryOperator *E) {
- if (!E)
- return;
- if (E->getOpcode() == UO_AddrOf) {
- const bool SavedAllEscaped = AllEscaped;
- AllEscaped = true;
- Visit(E->getSubExpr());
- AllEscaped = SavedAllEscaped;
- } else {
- Visit(E->getSubExpr());
- }
- }
- void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
- if (!E)
- return;
- if (E->getCastKind() == CK_ArrayToPointerDecay) {
- const bool SavedAllEscaped = AllEscaped;
- AllEscaped = true;
- Visit(E->getSubExpr());
- AllEscaped = SavedAllEscaped;
- } else {
- Visit(E->getSubExpr());
- }
- }
- void VisitExpr(const Expr *E) {
- if (!E)
- return;
- bool SavedAllEscaped = AllEscaped;
- if (!E->isLValue())
- AllEscaped = false;
- for (const Stmt *Child : E->children())
- if (Child)
- Visit(Child);
- AllEscaped = SavedAllEscaped;
- }
- void VisitStmt(const Stmt *S) {
- if (!S)
- return;
- for (const Stmt *Child : S->children())
- if (Child)
- Visit(Child);
- }
-
- /// Returns the record that handles all the escaped local variables and used
- /// instead of their original storage.
- const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
- if (!GlobalizedRD)
- buildRecordForGlobalizedVars(IsInTTDRegion);
- return GlobalizedRD;
- }
-
- /// Returns the field in the globalized record for the escaped variable.
- const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
- assert(GlobalizedRD &&
- "Record for globalized variables must be generated already.");
- auto I = MappedDeclsFields.find(VD);
- if (I == MappedDeclsFields.end())
- return nullptr;
- return I->getSecond();
- }
-
- /// Returns the list of the escaped local variables/parameters.
- ArrayRef<const ValueDecl *> getEscapedDecls() const {
- return EscapedDecls.getArrayRef();
- }
-
- /// Checks if the escaped local variable is actually a parameter passed by
- /// value.
- const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
- return EscapedParameters;
- }
-
- /// Returns the list of the escaped variables with the variably modified
- /// types.
- ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
- return EscapedVariableLengthDecls.getArrayRef();
- }
-};
-} // anonymous namespace
-
-/// Get the GPU warp size.
-static llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF) {
+llvm::Value *CGOpenMPRuntimeNVPTX::getGPUWarpSize(CodeGenFunction &CGF) {
return CGF.EmitRuntimeCall(
llvm::Intrinsic::getDeclaration(
&CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
"nvptx_warp_size");
}
-/// Get the id of the current thread on the GPU.
-static llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF) {
- return CGF.EmitRuntimeCall(
- llvm::Intrinsic::getDeclaration(
- &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
- "nvptx_tid");
-}
-
-/// Get the id of the warp in the block.
-/// We assume that the warp size is 32, which is always the case
-/// on the NVPTX device, to generate more efficient code.
-static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- return Bld.CreateAShr(getNVPTXThreadID(CGF), LaneIDBits, "nvptx_warp_id");
-}
-
-/// Get the id of the current lane in the Warp.
-/// We assume that the warp size is 32, which is always the case
-/// on the NVPTX device, to generate more efficient code.
-static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- return Bld.CreateAnd(getNVPTXThreadID(CGF), Bld.getInt32(LaneIDMask),
- "nvptx_lane_id");
-}
-
-/// Get the maximum number of threads in a block of the GPU.
-static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
- return CGF.EmitRuntimeCall(
- llvm::Intrinsic::getDeclaration(
- &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
- "nvptx_num_threads");
-}
-
-/// Get the value of the thread_limit clause in the teams directive.
-/// For the 'generic' execution mode, the runtime encodes thread_limit in
-/// the launch parameters, always starting thread_limit+warpSize threads per
-/// CTA. The threads in the last warp are reserved for master execution.
-/// For the 'spmd' execution mode, all threads in a CTA are part of the team.
-static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
- bool IsInSPMDExecutionMode = false) {
- CGBuilderTy &Bld = CGF.Builder;
- return IsInSPMDExecutionMode
- ? getNVPTXNumThreads(CGF)
- : Bld.CreateNUWSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
- "thread_limit");
-}
-
-/// Get the thread id of the OMP master thread.
-/// The master thread id is the first thread (lane) of the last warp in the
-/// GPU block. Warp size is assumed to be some power of 2.
-/// Thread id is 0 indexed.
-/// E.g: If NumThreads is 33, master id is 32.
-/// If NumThreads is 64, master id is 32.
-/// If NumThreads is 1024, master id is 992.
-static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
-
- // We assume that the warp size is a power of 2.
- llvm::Value *Mask = Bld.CreateNUWSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
-
- return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
- Bld.CreateNot(Mask), "master_tid");
-}
-
-CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
- CodeGenModule &CGM, SourceLocation Loc)
- : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
- Loc(Loc) {
- createWorkerFunction(CGM);
-}
-
-void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
- CodeGenModule &CGM) {
- // Create an worker function with no arguments.
-
- WorkerFn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- /*placeholder=*/"_worker", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
- WorkerFn->setDoesNotRecurse();
-}
-
-CGOpenMPRuntimeNVPTX::ExecutionMode
-CGOpenMPRuntimeNVPTX::getExecutionMode() const {
- return CurrentExecutionMode;
-}
-
-static CGOpenMPRuntimeNVPTX::DataSharingMode
-getDataSharingMode(CodeGenModule &CGM) {
- return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeNVPTX::CUDA
- : CGOpenMPRuntimeNVPTX::Generic;
-}
-
-/// Check for inner (nested) SPMD construct, if any
-static bool hasNestedSPMDDirective(ASTContext &Ctx,
- const OMPExecutableDirective &D) {
- const auto *CS = D.getInnermostCapturedStmt();
- const auto *Body =
- CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
-
- if (const auto *NestedDir =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
- switch (D.getDirectiveKind()) {
- case OMPD_target:
- if (isOpenMPParallelDirective(DKind))
- return true;
- if (DKind == OMPD_teams) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true);
- if (!Body)
- return false;
- ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NND =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- DKind = NND->getDirectiveKind();
- if (isOpenMPParallelDirective(DKind))
- return true;
- }
- }
- return false;
- case OMPD_target_teams:
- return isOpenMPParallelDirective(DKind);
- case OMPD_target_simd:
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_unknown:
- default:
- llvm_unreachable("Unexpected directive.");
- }
- }
-
- return false;
-}
-
-static bool supportsSPMDExecutionMode(ASTContext &Ctx,
- const OMPExecutableDirective &D) {
- OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
- switch (DirectiveKind) {
- case OMPD_target:
- case OMPD_target_teams:
- return hasNestedSPMDDirective(Ctx, D);
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_simd:
- case OMPD_target_teams_distribute_simd:
- return true;
- case OMPD_target_teams_distribute:
- return false;
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_unknown:
- default:
- break;
- }
- llvm_unreachable(
- "Unknown programming model for OpenMP directive on NVPTX target.");
-}
-
-/// Check if the directive is loops based and has schedule clause at all or has
-/// static scheduling.
-static bool hasStaticScheduling(const OMPExecutableDirective &D) {
- assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
- isOpenMPLoopDirective(D.getDirectiveKind()) &&
- "Expected loop-based directive.");
- return !D.hasClausesOfKind<OMPOrderedClause>() &&
- (!D.hasClausesOfKind<OMPScheduleClause>() ||
- llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
- [](const OMPScheduleClause *C) {
- return C->getScheduleKind() == OMPC_SCHEDULE_static;
- }));
-}
-
-/// Check for inner (nested) lightweight runtime construct, if any
-static bool hasNestedLightweightDirective(ASTContext &Ctx,
- const OMPExecutableDirective &D) {
- assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
- const auto *CS = D.getInnermostCapturedStmt();
- const auto *Body =
- CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
-
- if (const auto *NestedDir =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
- switch (D.getDirectiveKind()) {
- case OMPD_target:
- if (isOpenMPParallelDirective(DKind) &&
- isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
- hasStaticScheduling(*NestedDir))
- return true;
- if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
- return true;
- if (DKind == OMPD_parallel) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true);
- if (!Body)
- return false;
- ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NND =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- DKind = NND->getDirectiveKind();
- if (isOpenMPWorksharingDirective(DKind) &&
- isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
- return true;
- }
- } else if (DKind == OMPD_teams) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true);
- if (!Body)
- return false;
- ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NND =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- DKind = NND->getDirectiveKind();
- if (isOpenMPParallelDirective(DKind) &&
- isOpenMPWorksharingDirective(DKind) &&
- isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
- return true;
- if (DKind == OMPD_parallel) {
- Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true);
- if (!Body)
- return false;
- ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NND =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- DKind = NND->getDirectiveKind();
- if (isOpenMPWorksharingDirective(DKind) &&
- isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
- return true;
- }
- }
- }
- }
- return false;
- case OMPD_target_teams:
- if (isOpenMPParallelDirective(DKind) &&
- isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
- hasStaticScheduling(*NestedDir))
- return true;
- if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
- return true;
- if (DKind == OMPD_parallel) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true);
- if (!Body)
- return false;
- ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NND =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- DKind = NND->getDirectiveKind();
- if (isOpenMPWorksharingDirective(DKind) &&
- isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
- return true;
- }
- }
- return false;
- case OMPD_target_parallel:
- if (DKind == OMPD_simd)
- return true;
- return isOpenMPWorksharingDirective(DKind) &&
- isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
- case OMPD_target_teams_distribute:
- case OMPD_target_simd:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_unknown:
- default:
- llvm_unreachable("Unexpected directive.");
- }
- }
-
- return false;
-}
-
-/// Checks if the construct supports lightweight runtime. It must be SPMD
-/// construct + inner loop-based construct with static scheduling.
-static bool supportsLightweightRuntime(ASTContext &Ctx,
- const OMPExecutableDirective &D) {
- if (!supportsSPMDExecutionMode(Ctx, D))
- return false;
- OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
- switch (DirectiveKind) {
- case OMPD_target:
- case OMPD_target_teams:
- case OMPD_target_parallel:
- return hasNestedLightweightDirective(Ctx, D);
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- // (Last|First)-privates must be shared in parallel region.
- return hasStaticScheduling(D);
- case OMPD_target_simd:
- case OMPD_target_teams_distribute_simd:
- return true;
- case OMPD_target_teams_distribute:
- return false;
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_unknown:
- default:
- break;
- }
- llvm_unreachable(
- "Unknown programming model for OpenMP directive on NVPTX target.");
-}
-
-void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
- StringRef ParentName,
- llvm::Function *&OutlinedFn,
- llvm::Constant *&OutlinedFnID,
- bool IsOffloadEntry,
- const RegionCodeGenTy &CodeGen) {
- ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
- EntryFunctionState EST;
- WorkerFunctionState WST(CGM, D.getBeginLoc());
- Work.clear();
- WrapperFunctionsMap.clear();
-
- // Emit target region as a standalone region.
- class NVPTXPrePostActionTy : public PrePostActionTy {
- CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
- CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
-
- public:
- NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
- CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
- : EST(EST), WST(WST) {}
- void Enter(CodeGenFunction &CGF) override {
- auto &RT =
- static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
- RT.emitNonSPMDEntryHeader(CGF, EST, WST);
- // Skip target region initialization.
- RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
- }
- void Exit(CodeGenFunction &CGF) override {
- auto &RT =
- static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
- RT.clearLocThreadIdInsertPt(CGF);
- RT.emitNonSPMDEntryFooter(CGF, EST);
- }
- } Action(EST, WST);
- CodeGen.setAction(Action);
- IsInTTDRegion = true;
- // Reserve place for the globalized memory.
- GlobalizedRecords.emplace_back();
- if (!KernelStaticGlobalized) {
- KernelStaticGlobalized = new llvm::GlobalVariable(
- CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
- llvm::GlobalValue::InternalLinkage,
- llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
- "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
- llvm::GlobalValue::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
- }
- emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
- IsOffloadEntry, CodeGen);
- IsInTTDRegion = false;
-
- // Now change the name of the worker function to correspond to this target
- // region's entry function.
- WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
-
- // Create the worker function
- emitWorkerFunction(WST);
-}
-
-// Setup NVPTX threads for master-worker OpenMP scheme.
-void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
- EntryFunctionState &EST,
- WorkerFunctionState &WST) {
- CGBuilderTy &Bld = CGF.Builder;
-
- llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
- llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
- llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
- EST.ExitBB = CGF.createBasicBlock(".exit");
-
- llvm::Value *IsWorker =
- Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
- Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
-
- CGF.EmitBlock(WorkerBB);
- emitCall(CGF, WST.Loc, WST.WorkerFn);
- CGF.EmitBranch(EST.ExitBB);
-
- CGF.EmitBlock(MasterCheckBB);
- llvm::Value *IsMaster =
- Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
- Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
-
- CGF.EmitBlock(MasterBB);
- IsInTargetMasterThreadRegion = true;
- // SEQUENTIAL (MASTER) REGION START
- // First action in sequential region:
- // Initialize the state of the OpenMP runtime library on the GPU.
- // TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {getThreadLimit(CGF),
- Bld.getInt16(/*RequiresOMPRuntime=*/1)};
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
-
- // For data sharing, we need to initialize the stack.
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_init_stack));
-
- emitGenericVarsProlog(CGF, WST.Loc);
-}
-
-void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
- EntryFunctionState &EST) {
- IsInTargetMasterThreadRegion = false;
- if (!CGF.HaveInsertPoint())
- return;
-
- emitGenericVarsEpilog(CGF);
-
- if (!EST.ExitBB)
- EST.ExitBB = CGF.createBasicBlock(".exit");
-
- llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
- CGF.EmitBranch(TerminateBB);
-
- CGF.EmitBlock(TerminateBB);
- // Signal termination condition.
- // TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args);
- // Barrier to terminate worker threads.
- syncCTAThreads(CGF);
- // Master thread jumps to exit point.
- CGF.EmitBranch(EST.ExitBB);
-
- CGF.EmitBlock(EST.ExitBB);
- EST.ExitBB = nullptr;
-}
-
-void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
- StringRef ParentName,
- llvm::Function *&OutlinedFn,
- llvm::Constant *&OutlinedFnID,
- bool IsOffloadEntry,
- const RegionCodeGenTy &CodeGen) {
- ExecutionRuntimeModesRAII ModeRAII(
- CurrentExecutionMode, RequiresFullRuntime,
- CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
- !supportsLightweightRuntime(CGM.getContext(), D));
- EntryFunctionState EST;
-
- // Emit target region as a standalone region.
- class NVPTXPrePostActionTy : public PrePostActionTy {
- CGOpenMPRuntimeNVPTX &RT;
- CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
- const OMPExecutableDirective &D;
-
- public:
- NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
- CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
- const OMPExecutableDirective &D)
- : RT(RT), EST(EST), D(D) {}
- void Enter(CodeGenFunction &CGF) override {
- RT.emitSPMDEntryHeader(CGF, EST, D);
- // Skip target region initialization.
- RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
- }
- void Exit(CodeGenFunction &CGF) override {
- RT.clearLocThreadIdInsertPt(CGF);
- RT.emitSPMDEntryFooter(CGF, EST);
- }
- } Action(*this, EST, D);
- CodeGen.setAction(Action);
- IsInTTDRegion = true;
- // Reserve place for the globalized memory.
- GlobalizedRecords.emplace_back();
- if (!KernelStaticGlobalized) {
- KernelStaticGlobalized = new llvm::GlobalVariable(
- CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
- llvm::GlobalValue::InternalLinkage,
- llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
- "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
- llvm::GlobalValue::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
- }
- emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
- IsOffloadEntry, CodeGen);
- IsInTTDRegion = false;
-}
-
-void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
- CodeGenFunction &CGF, EntryFunctionState &EST,
- const OMPExecutableDirective &D) {
- CGBuilderTy &Bld = CGF.Builder;
-
- // Setup BBs in entry function.
- llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
- EST.ExitBB = CGF.createBasicBlock(".exit");
-
- llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
- /*RequiresOMPRuntime=*/
- Bld.getInt16(RequiresFullRuntime ? 1 : 0),
- /*RequiresDataSharing=*/Bld.getInt16(0)};
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
-
- if (RequiresFullRuntime) {
- // For data sharing, we need to initialize the stack.
- CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd));
- }
-
- CGF.EmitBranch(ExecuteBB);
-
- CGF.EmitBlock(ExecuteBB);
-
- IsInTargetMasterThreadRegion = true;
-}
-
-void CGOpenMPRuntimeNVPTX::emitSPMDEntryFooter(CodeGenFunction &CGF,
- EntryFunctionState &EST) {
- IsInTargetMasterThreadRegion = false;
- if (!CGF.HaveInsertPoint())
- return;
-
- if (!EST.ExitBB)
- EST.ExitBB = CGF.createBasicBlock(".exit");
-
- llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
- CGF.EmitBranch(OMPDeInitBB);
-
- CGF.EmitBlock(OMPDeInitBB);
- // DeInitialize the OMP state in the runtime; called by all active threads.
- llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
- CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2), Args);
- CGF.EmitBranch(EST.ExitBB);
-
- CGF.EmitBlock(EST.ExitBB);
- EST.ExitBB = nullptr;
-}
-
-// Create a unique global variable to indicate the execution mode of this target
-// region. The execution mode is either 'generic', or 'spmd' depending on the
-// target directive. This variable is picked up by the offload library to setup
-// the device appropriately before kernel launch. If the execution mode is
-// 'generic', the runtime reserves one warp for the master, otherwise, all
-// warps participate in parallel work.
-static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
- bool Mode) {
- auto *GVMode =
- new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
- llvm::GlobalValue::WeakAnyLinkage,
- llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
- Twine(Name, "_exec_mode"));
- CGM.addCompilerUsedGlobal(GVMode);
-}
-
-void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
- ASTContext &Ctx = CGM.getContext();
-
- CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
- CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
- WST.Loc, WST.Loc);
- emitWorkerLoop(CGF, WST);
- CGF.FinishFunction();
-}
-
-void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
- WorkerFunctionState &WST) {
- //
- // The workers enter this loop and wait for parallel work from the master.
- // When the master encounters a parallel region it sets up the work + variable
- // arguments, and wakes up the workers. The workers first check to see if
- // they are required for the parallel region, i.e., within the # of requested
- // parallel threads. The activated workers load the variable arguments and
- // execute the parallel work.
- //
-
+llvm::Value *CGOpenMPRuntimeNVPTX::getGPUThreadID(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
-
- llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
- llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
- llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
- llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
- llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
-
- CGF.EmitBranch(AwaitBB);
-
- // Workers wait for work from master.
- CGF.EmitBlock(AwaitBB);
- // Wait for parallel work
- syncCTAThreads(CGF);
-
- Address WorkFn =
- CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
- Address ExecStatus =
- CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
- CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
- CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
-
- // TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {WorkFn.getPointer()};
- llvm::Value *Ret = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
- Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
-
- // On termination condition (workid == 0), exit loop.
- llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
- llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
- Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
-
- // Activate requested workers.
- CGF.EmitBlock(SelectWorkersBB);
- llvm::Value *IsActive =
- Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
- Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
-
- // Signal start of parallel region.
- CGF.EmitBlock(ExecuteBB);
- // Skip initialization.
- setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
-
- // Process work items: outlined parallel functions.
- for (llvm::Function *W : Work) {
- // Try to match this outlined function.
- llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
-
- llvm::Value *WorkFnMatch =
- Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
-
- llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
- llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
- Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
-
- // Execute this outlined function.
- CGF.EmitBlock(ExecuteFNBB);
-
- // Insert call to work function via shared wrapper. The shared
- // wrapper takes two arguments:
- // - the parallelism level;
- // - the thread ID;
- emitCall(CGF, WST.Loc, W,
- {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
-
- // Go to end of parallel region.
- CGF.EmitBranch(TerminateBB);
-
- CGF.EmitBlock(CheckNextBB);
- }
- // Default case: call to outlined function through pointer if the target
- // region makes a declare target call that may contain an orphaned parallel
- // directive.
- auto *ParallelFnTy =
- llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
- /*isVarArg=*/false);
- llvm::Value *WorkFnCast =
- Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
- // Insert call to work function via shared wrapper. The shared
- // wrapper takes two arguments:
- // - the parallelism level;
- // - the thread ID;
- emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
- {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
- // Go to end of parallel region.
- CGF.EmitBranch(TerminateBB);
-
- // Signal end of parallel region.
- CGF.EmitBlock(TerminateBB);
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel),
- llvm::None);
- CGF.EmitBranch(BarrierBB);
-
- // All active and inactive workers wait at a barrier after parallel region.
- CGF.EmitBlock(BarrierBB);
- // Barrier after parallel region.
- syncCTAThreads(CGF);
- CGF.EmitBranch(AwaitBB);
-
- // Exit target region.
- CGF.EmitBlock(ExitBB);
- // Skip initialization.
- clearLocThreadIdInsertPt(CGF);
-}
-
-/// Returns specified OpenMP runtime function for the current OpenMP
-/// implementation. Specialized for the NVPTX device.
-/// \param Function OpenMP runtime function.
-/// \return Specified function.
-llvm::FunctionCallee
-CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
- llvm::FunctionCallee RTLFn = nullptr;
- switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
- case OMPRTL_NVPTX__kmpc_kernel_init: {
- // Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
- // RequiresOMPRuntime);
- llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
- break;
- }
- case OMPRTL_NVPTX__kmpc_kernel_deinit: {
- // Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
- llvm::Type *TypeParams[] = {CGM.Int16Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
- break;
- }
- case OMPRTL_NVPTX__kmpc_spmd_kernel_init: {
- // Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
- // int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
- llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
- break;
- }
- case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2: {
- // Build void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
- llvm::Type *TypeParams[] = {CGM.Int16Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit_v2");
- break;
- }
- case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
- /// Build void __kmpc_kernel_prepare_parallel(
- /// void *outlined_function);
- llvm::Type *TypeParams[] = {CGM.Int8PtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
- break;
- }
- case OMPRTL_NVPTX__kmpc_kernel_parallel: {
- /// Build bool __kmpc_kernel_parallel(void **outlined_function);
- llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy};
- llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
- auto *FnTy =
- llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel");
- break;
- }
- case OMPRTL_NVPTX__kmpc_kernel_end_parallel: {
- /// Build void __kmpc_kernel_end_parallel();
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel");
- break;
- }
- case OMPRTL_NVPTX__kmpc_serialized_parallel: {
- // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
- break;
- }
- case OMPRTL_NVPTX__kmpc_end_serialized_parallel: {
- // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
- break;
- }
- case OMPRTL_NVPTX__kmpc_shuffle_int32: {
- // Build int32_t __kmpc_shuffle_int32(int32_t element,
- // int16_t lane_offset, int16_t warp_size);
- llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32");
- break;
- }
- case OMPRTL_NVPTX__kmpc_shuffle_int64: {
- // Build int64_t __kmpc_shuffle_int64(int64_t element,
- // int16_t lane_offset, int16_t warp_size);
- llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
- break;
- }
- case OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2: {
- // Build int32_t kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc,
- // kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void*
- // reduce_data, void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t
- // lane_id, int16_t lane_offset, int16_t Algorithm Version), void
- // (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
- llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
- CGM.Int16Ty, CGM.Int16Ty};
- auto *ShuffleReduceFnTy =
- llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
- auto *InterWarpCopyFnTy =
- llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
- /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
- CGM.Int32Ty,
- CGM.Int32Ty,
- CGM.SizeTy,
- CGM.VoidPtrTy,
- ShuffleReduceFnTy->getPointerTo(),
- InterWarpCopyFnTy->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait_v2");
- break;
- }
- case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
- // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
- break;
- }
- case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2: {
- // Build int32_t __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
- // global_tid, void *global_buffer, int32_t num_of_records, void*
- // reduce_data,
- // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
- // lane_offset, int16_t shortCircuit),
- // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
- // (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
- // void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
- // void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
- // int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
- // *buffer, int idx, void *reduce_data));
- llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
- CGM.Int16Ty, CGM.Int16Ty};
- auto *ShuffleReduceFnTy =
- llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
- auto *InterWarpCopyFnTy =
- llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
- /*isVarArg=*/false);
- llvm::Type *GlobalListTypeParams[] = {CGM.VoidPtrTy, CGM.IntTy,
- CGM.VoidPtrTy};
- auto *GlobalListFnTy =
- llvm::FunctionType::get(CGM.VoidTy, GlobalListTypeParams,
- /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
- CGM.Int32Ty,
- CGM.VoidPtrTy,
- CGM.Int32Ty,
- CGM.VoidPtrTy,
- ShuffleReduceFnTy->getPointerTo(),
- InterWarpCopyFnTy->getPointerTo(),
- GlobalListFnTy->getPointerTo(),
- GlobalListFnTy->getPointerTo(),
- GlobalListFnTy->getPointerTo(),
- GlobalListFnTy->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_v2");
- break;
- }
- case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
- /// Build void __kmpc_data_sharing_init_stack();
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack");
- break;
- }
- case OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd: {
- /// Build void __kmpc_data_sharing_init_stack_spmd();
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
- RTLFn =
- CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
- break;
- }
- case OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack: {
- // Build void *__kmpc_data_sharing_coalesced_push_stack(size_t size,
- // int16_t UseSharedMemory);
- llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack");
- break;
- }
- case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: {
- // Build void *__kmpc_data_sharing_push_stack(size_t size, int16_t
- // UseSharedMemory);
- llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_data_sharing_push_stack");
- break;
- }
- case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
- // Build void __kmpc_data_sharing_pop_stack(void *a);
- llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy,
- /*Name=*/"__kmpc_data_sharing_pop_stack");
- break;
- }
- case OMPRTL_NVPTX__kmpc_begin_sharing_variables: {
- /// Build void __kmpc_begin_sharing_variables(void ***args,
- /// size_t n_args);
- llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo(), CGM.SizeTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_begin_sharing_variables");
- break;
- }
- case OMPRTL_NVPTX__kmpc_end_sharing_variables: {
- /// Build void __kmpc_end_sharing_variables();
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_sharing_variables");
- break;
- }
- case OMPRTL_NVPTX__kmpc_get_shared_variables: {
- /// Build void __kmpc_get_shared_variables(void ***GlobalArgs);
- llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_shared_variables");
- break;
- }
- case OMPRTL_NVPTX__kmpc_parallel_level: {
- // Build uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int16Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_parallel_level");
- break;
- }
- case OMPRTL_NVPTX__kmpc_is_spmd_exec_mode: {
- // Build int8_t __kmpc_is_spmd_exec_mode();
- auto *FnTy = llvm::FunctionType::get(CGM.Int8Ty, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode");
- break;
- }
- case OMPRTL_NVPTX__kmpc_get_team_static_memory: {
- // Build void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
- // const void *buf, size_t size, int16_t is_shared, const void **res);
- llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.VoidPtrTy, CGM.SizeTy,
- CGM.Int16Ty, CGM.VoidPtrPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_team_static_memory");
- break;
- }
- case OMPRTL_NVPTX__kmpc_restore_team_static_memory: {
- // Build void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
- // int16_t is_shared);
- llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.Int16Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn =
- CGM.CreateRuntimeFunction(FnTy, "__kmpc_restore_team_static_memory");
- break;
- }
- case OMPRTL__kmpc_barrier: {
- // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn =
- CGM.CreateConvergentRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
- break;
- }
- case OMPRTL__kmpc_barrier_simple_spmd: {
- // Build void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateConvergentRuntimeFunction(
- FnTy, /*Name*/ "__kmpc_barrier_simple_spmd");
- break;
- }
- case OMPRTL_NVPTX__kmpc_warp_active_thread_mask: {
- // Build int32_t __kmpc_warp_active_thread_mask(void);
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, llvm::None, /*isVarArg=*/false);
- RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_warp_active_thread_mask");
- break;
- }
- case OMPRTL_NVPTX__kmpc_syncwarp: {
- // Build void __kmpc_syncwarp(kmp_int32 Mask);
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, CGM.Int32Ty, /*isVarArg=*/false);
- RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_syncwarp");
- break;
- }
- }
- return RTLFn;
-}
-
-void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
- llvm::Constant *Addr,
- uint64_t Size, int32_t,
- llvm::GlobalValue::LinkageTypes) {
- // TODO: Add support for global variables on the device after declare target
- // support.
- if (!isa<llvm::Function>(Addr))
- return;
- llvm::Module &M = CGM.getModule();
- llvm::LLVMContext &Ctx = CGM.getLLVMContext();
-
- // Get "nvvm.annotations" metadata node
- llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
-
- llvm::Metadata *MDVals[] = {
- llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
- llvm::ConstantAsMetadata::get(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
- // Append metadata to nvvm.annotations
- MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
-}
-
-void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
- const OMPExecutableDirective &D, StringRef ParentName,
- llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
- bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
- if (!IsOffloadEntry) // Nothing to do.
- return;
-
- assert(!ParentName.empty() && "Invalid target region parent name!");
-
- bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
- if (Mode)
- emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
- CodeGen);
- else
- emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
- CodeGen);
-
- setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
-}
-
-namespace {
-LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
-/// Enum for accesseing the reserved_2 field of the ident_t struct.
-enum ModeFlagsTy : unsigned {
- /// Bit set to 1 when in SPMD mode.
- KMP_IDENT_SPMD_MODE = 0x01,
- /// Bit set to 1 when a simplified runtime is used.
- KMP_IDENT_SIMPLE_RT_MODE = 0x02,
- LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
-};
-
-/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
-static const ModeFlagsTy UndefinedMode =
- (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
-} // anonymous namespace
-
-unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const {
- switch (getExecutionMode()) {
- case EM_SPMD:
- if (requiresFullRuntime())
- return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
- return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
- case EM_NonSPMD:
- assert(requiresFullRuntime() && "Expected full runtime.");
- return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
- case EM_Unknown:
- return UndefinedMode;
- }
- llvm_unreachable("Unknown flags are requested.");
-}
-
-CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
- : CGOpenMPRuntime(CGM, "_", "$") {
- if (!CGM.getLangOpts().OpenMPIsDevice)
- llvm_unreachable("OpenMP NVPTX can only handle device code.");
-}
-
-void CGOpenMPRuntimeNVPTX::emitProcBindClause(CodeGenFunction &CGF,
- ProcBindKind ProcBind,
- SourceLocation Loc) {
- // Do nothing in case of SPMD mode and L0 parallel.
- if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
- return;
-
- CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
+ llvm::Function *F;
+ F = llvm::Intrinsic::getDeclaration(
+ &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x);
+ return Bld.CreateCall(F, llvm::None, "nvptx_tid");
}
-void CGOpenMPRuntimeNVPTX::emitNumThreadsClause(CodeGenFunction &CGF,
- llvm::Value *NumThreads,
- SourceLocation Loc) {
- // Do nothing in case of SPMD mode and L0 parallel.
- if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
- return;
-
- CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
-}
-
-void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,
- const Expr *NumTeams,
- const Expr *ThreadLimit,
- SourceLocation Loc) {}
-
-llvm::Function *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
- // Emit target region as a standalone region.
- class NVPTXPrePostActionTy : public PrePostActionTy {
- bool &IsInParallelRegion;
- bool PrevIsInParallelRegion;
-
- public:
- NVPTXPrePostActionTy(bool &IsInParallelRegion)
- : IsInParallelRegion(IsInParallelRegion) {}
- void Enter(CodeGenFunction &CGF) override {
- PrevIsInParallelRegion = IsInParallelRegion;
- IsInParallelRegion = true;
- }
- void Exit(CodeGenFunction &CGF) override {
- IsInParallelRegion = PrevIsInParallelRegion;
- }
- } Action(IsInParallelRegion);
- CodeGen.setAction(Action);
- bool PrevIsInTTDRegion = IsInTTDRegion;
- IsInTTDRegion = false;
- bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
- IsInTargetMasterThreadRegion = false;
- auto *OutlinedFun =
- cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
- D, ThreadIDVar, InnermostKind, CodeGen));
- if (CGM.getLangOpts().Optimize) {
- OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
- OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
- OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
- }
- IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
- IsInTTDRegion = PrevIsInTTDRegion;
- if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD &&
- !IsInParallelRegion) {
- llvm::Function *WrapperFun =
- createParallelDataSharingWrapper(OutlinedFun, D);
- WrapperFunctionsMap[OutlinedFun] = WrapperFun;
- }
-
- return OutlinedFun;
-}
-
-/// Get list of lastprivate variables from the teams distribute ... or
-/// teams {distribute ...} directives.
-static void
-getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
- llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
- assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
- "expected teams directive.");
- const OMPExecutableDirective *Dir = &D;
- if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
- if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
- Ctx,
- D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true))) {
- Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
- if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
- Dir = nullptr;
- }
- }
- if (!Dir)
- return;
- for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
- for (const Expr *E : C->getVarRefs())
- Vars.push_back(getPrivateItem(E));
- }
-}
-
-/// Get list of reduction variables from the teams ... directives.
-static void
-getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
- llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
- assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
- "expected teams directive.");
- for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
- for (const Expr *E : C->privates())
- Vars.push_back(getPrivateItem(E));
- }
-}
-
-llvm::Function *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
- SourceLocation Loc = D.getBeginLoc();
-
- const RecordDecl *GlobalizedRD = nullptr;
- llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
- llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
- // Globalize team reductions variable unconditionally in all modes.
- if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
- getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
- if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
- getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
- if (!LastPrivatesReductions.empty()) {
- GlobalizedRD = ::buildRecordForGlobalizedVars(
- CGM.getContext(), llvm::None, LastPrivatesReductions,
- MappedDeclsFields, WarpSize);
- }
- } else if (!LastPrivatesReductions.empty()) {
- assert(!TeamAndReductions.first &&
- "Previous team declaration is not expected.");
- TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
- std::swap(TeamAndReductions.second, LastPrivatesReductions);
- }
-
- // Emit target region as a standalone region.
- class NVPTXPrePostActionTy : public PrePostActionTy {
- SourceLocation &Loc;
- const RecordDecl *GlobalizedRD;
- llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &MappedDeclsFields;
-
- public:
- NVPTXPrePostActionTy(
- SourceLocation &Loc, const RecordDecl *GlobalizedRD,
- llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &MappedDeclsFields)
- : Loc(Loc), GlobalizedRD(GlobalizedRD),
- MappedDeclsFields(MappedDeclsFields) {}
- void Enter(CodeGenFunction &CGF) override {
- auto &Rt =
- static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
- if (GlobalizedRD) {
- auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
- I->getSecond().GlobalRecord = GlobalizedRD;
- I->getSecond().MappedParams =
- std::make_unique<CodeGenFunction::OMPMapVars>();
- DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
- for (const auto &Pair : MappedDeclsFields) {
- assert(Pair.getFirst()->isCanonicalDecl() &&
- "Expected canonical declaration");
- Data.insert(std::make_pair(Pair.getFirst(),
- MappedVarData(Pair.getSecond(),
- /*IsOnePerTeam=*/true)));
- }
- }
- Rt.emitGenericVarsProlog(CGF, Loc);
- }
- void Exit(CodeGenFunction &CGF) override {
- static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
- .emitGenericVarsEpilog(CGF);
- }
- } Action(Loc, GlobalizedRD, MappedDeclsFields);
- CodeGen.setAction(Action);
- llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
- D, ThreadIDVar, InnermostKind, CodeGen);
- if (CGM.getLangOpts().Optimize) {
- OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
- OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
- OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
- }
-
- return OutlinedFun;
-}
-
-void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
- SourceLocation Loc,
- bool WithSPMDCheck) {
- if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
- getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
- return;
-
+llvm::Value *CGOpenMPRuntimeNVPTX::getGPUNumThreads(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
-
- const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
- if (I == FunctionGlobalizedDecls.end())
- return;
- if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
- QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
- QualType SecGlobalRecTy;
-
- // Recover pointer to this function's global record. The runtime will
- // handle the specifics of the allocation of the memory.
- // Use actual memory size of the record including the padding
- // for alignment purposes.
- unsigned Alignment =
- CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
- unsigned GlobalRecordSize =
- CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
- GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
-
- llvm::PointerType *GlobalRecPtrTy =
- CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
- llvm::Value *GlobalRecCastAddr;
- llvm::Value *IsTTD = nullptr;
- if (!IsInTTDRegion &&
- (WithSPMDCheck ||
- getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
- llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
- llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
- if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *PL = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
- {RTLoc, ThreadID});
- IsTTD = Bld.CreateIsNull(PL);
- }
- llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
- Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(SPMDBB);
- Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
- CharUnits::fromQuantity(Alignment));
- CGF.EmitBranch(ExitBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(NonSPMDBB);
- llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
- if (const RecordDecl *SecGlobalizedVarsRecord =
- I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
- SecGlobalRecTy =
- CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
-
- // Recover pointer to this function's global record. The runtime will
- // handle the specifics of the allocation of the memory.
- // Use actual memory size of the record including the padding
- // for alignment purposes.
- unsigned Alignment =
- CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
- unsigned GlobalRecordSize =
- CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
- GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
- Size = Bld.CreateSelect(
- IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
- }
- // TODO: allow the usage of shared memory to be controlled by
- // the user, for now, default to global.
- llvm::Value *GlobalRecordSizeArg[] = {
- Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
- llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
- GlobalRecordSizeArg);
- GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, GlobalRecPtrTy);
- CGF.EmitBlock(ExitBB);
- auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
- /*NumReservedValues=*/2, "_select_stack");
- Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
- Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
- GlobalRecCastAddr = Phi;
- I->getSecond().GlobalRecordAddr = Phi;
- I->getSecond().IsInSPMDModeFlag = IsSPMD;
- } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
- assert(GlobalizedRecords.back().Records.size() < 2 &&
- "Expected less than 2 globalized records: one for target and one "
- "for teams.");
- unsigned Offset = 0;
- for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
- QualType RDTy = CGM.getContext().getRecordType(RD);
- unsigned Alignment =
- CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
- unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
- Offset =
- llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
- }
- unsigned Alignment =
- CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
- Offset = llvm::alignTo(Offset, Alignment);
- GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
- ++GlobalizedRecords.back().RegionCounter;
- if (GlobalizedRecords.back().Records.size() == 1) {
- assert(KernelStaticGlobalized &&
- "Kernel static pointer must be initialized already.");
- auto *UseSharedMemory = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, nullptr,
- "_openmp_static_kernel$is_shared");
- UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
- /*DestWidth=*/16, /*Signed=*/0);
- llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
- Address(UseSharedMemory,
- CGM.getContext().getTypeAlignInChars(Int16Ty)),
- /*Volatile=*/false, Int16Ty, Loc);
- auto *StaticGlobalized = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
- llvm::GlobalValue::CommonLinkage, nullptr);
- auto *RecSize = new llvm::GlobalVariable(
- CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, nullptr,
- "_openmp_static_kernel$size");
- RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- llvm::Value *Ld = CGF.EmitLoadOfScalar(
- Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
- CGM.getContext().getSizeType(), Loc);
- llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- KernelStaticGlobalized, CGM.VoidPtrPtrTy);
- llvm::Value *GlobalRecordSizeArg[] = {
- llvm::ConstantInt::get(
- CGM.Int16Ty,
- getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
- StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
- CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_get_team_static_memory),
- GlobalRecordSizeArg);
- GlobalizedRecords.back().Buffer = StaticGlobalized;
- GlobalizedRecords.back().RecSize = RecSize;
- GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
- GlobalizedRecords.back().Loc = Loc;
- }
- assert(KernelStaticGlobalized && "Global address must be set already.");
- Address FrameAddr = CGF.EmitLoadOfPointer(
- Address(KernelStaticGlobalized, CGM.getPointerAlign()),
- CGM.getContext()
- .getPointerType(CGM.getContext().VoidPtrTy)
- .castAs<PointerType>());
- llvm::Value *GlobalRecValue =
- Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
- I->getSecond().GlobalRecordAddr = GlobalRecValue;
- I->getSecond().IsInSPMDModeFlag = nullptr;
- GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
- } else {
- // TODO: allow the usage of shared memory to be controlled by
- // the user, for now, default to global.
- bool UseSharedMemory =
- IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
- llvm::Value *GlobalRecordSizeArg[] = {
- llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
- CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)};
- llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(
- IsInTTDRegion
- ? OMPRTL_NVPTX__kmpc_data_sharing_push_stack
- : OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
- GlobalRecordSizeArg);
- GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, GlobalRecPtrTy);
- I->getSecond().GlobalRecordAddr = GlobalRecValue;
- I->getSecond().IsInSPMDModeFlag = nullptr;
- }
- LValue Base =
- CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
-
- // Emit the "global alloca" which is a GEP from the global declaration
- // record using the pointer returned by the runtime.
- LValue SecBase;
- decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
- if (IsTTD) {
- SecIt = I->getSecond().SecondaryLocalVarData->begin();
- llvm::PointerType *SecGlobalRecPtrTy =
- CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
- SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
- SecGlobalRecTy);
- }
- for (auto &Rec : I->getSecond().LocalVarData) {
- bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
- llvm::Value *ParValue;
- if (EscapedParam) {
- const auto *VD = cast<VarDecl>(Rec.first);
- LValue ParLVal =
- CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
- ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
- }
- LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
- // Emit VarAddr basing on lane-id if required.
- QualType VarTy;
- if (Rec.second.IsOnePerTeam) {
- VarTy = Rec.second.FD->getType();
- } else {
- llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
- VarAddr.getAddress(CGF).getPointer(),
- {Bld.getInt32(0), getNVPTXLaneID(CGF)});
- VarTy =
- Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
- VarAddr = CGF.MakeAddrLValue(
- Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
- AlignmentSource::Decl);
- }
- Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
- if (!IsInTTDRegion &&
- (WithSPMDCheck ||
- getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
- assert(I->getSecond().IsInSPMDModeFlag &&
- "Expected unknown execution mode or required SPMD check.");
- if (IsTTD) {
- assert(SecIt->second.IsOnePerTeam &&
- "Secondary glob data must be one per team.");
- LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
- VarAddr.setAddress(
- Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF),
- VarAddr.getPointer(CGF)),
- VarAddr.getAlignment()));
- Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
- }
- Address GlobalPtr = Rec.second.PrivateAddr;
- Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
- Rec.second.PrivateAddr = Address(
- Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
- LocalAddr.getPointer(), GlobalPtr.getPointer()),
- LocalAddr.getAlignment());
- }
- if (EscapedParam) {
- const auto *VD = cast<VarDecl>(Rec.first);
- CGF.EmitStoreOfScalar(ParValue, VarAddr);
- I->getSecond().MappedParams->setVarAddr(CGF, VD,
- VarAddr.getAddress(CGF));
- }
- if (IsTTD)
- ++SecIt;
- }
- }
- for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
- // Recover pointer to this function's global record. The runtime will
- // handle the specifics of the allocation of the memory.
- // Use actual memory size of the record including the padding
- // for alignment purposes.
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Value *Size = CGF.getTypeSize(VD->getType());
- CharUnits Align = CGM.getContext().getDeclAlign(VD);
- Size = Bld.CreateNUWAdd(
- Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
- llvm::Value *AlignVal =
- llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
- Size = Bld.CreateUDiv(Size, AlignVal);
- Size = Bld.CreateNUWMul(Size, AlignVal);
- // TODO: allow the usage of shared memory to be controlled by
- // the user, for now, default to global.
- llvm::Value *GlobalRecordSizeArg[] = {
- Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
- llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
- GlobalRecordSizeArg);
- llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
- LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
- CGM.getContext().getDeclAlign(VD),
- AlignmentSource::Decl);
- I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
- Base.getAddress(CGF));
- I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
- }
- I->getSecond().MappedParams->apply(CGF);
-}
-
-void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
- bool WithSPMDCheck) {
- if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
- getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
- return;
-
- const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
- if (I != FunctionGlobalizedDecls.end()) {
- I->getSecond().MappedParams->restore(CGF);
- if (!CGF.HaveInsertPoint())
- return;
- for (llvm::Value *Addr :
- llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
- Addr);
- }
- if (I->getSecond().GlobalRecordAddr) {
- if (!IsInTTDRegion &&
- (WithSPMDCheck ||
- getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
- CGBuilderTy &Bld = CGF.Builder;
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
- llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
- Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(NonSPMDBB);
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
- CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
- CGF.EmitBlock(ExitBB);
- } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
- assert(GlobalizedRecords.back().RegionCounter > 0 &&
- "region counter must be > 0.");
- --GlobalizedRecords.back().RegionCounter;
- // Emit the restore function only in the target region.
- if (GlobalizedRecords.back().RegionCounter == 0) {
- QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
- /*DestWidth=*/16, /*Signed=*/0);
- llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
- Address(GlobalizedRecords.back().UseSharedMemory,
- CGM.getContext().getTypeAlignInChars(Int16Ty)),
- /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
- llvm::Value *Args[] = {
- llvm::ConstantInt::get(
- CGM.Int16Ty,
- getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
- IsInSharedMemory};
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_restore_team_static_memory),
- Args);
- }
- } else {
- CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
- I->getSecond().GlobalRecordAddr);
- }
- }
- }
-}
-
-void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
- const OMPExecutableDirective &D,
- SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars) {
- if (!CGF.HaveInsertPoint())
- return;
-
- Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
- OutlinedFnArgs.push_back(ZeroAddr.getPointer());
- OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
- emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
-}
-
-void CGOpenMPRuntimeNVPTX::emitParallelCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
- if (!CGF.HaveInsertPoint())
- return;
-
- if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
- emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
- else
- emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
-}
-
-void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
- llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
-
- // Force inline this outlined function at its call site.
- Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
-
- Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- // ThreadId for serialized parallels is 0.
- Address ThreadIDAddr = ZeroAddr;
- auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
- Action.Enter(CGF);
-
- Address ZeroAddr =
- CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".bound.zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
- OutlinedFnArgs.push_back(ZeroAddr.getPointer());
- OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
- emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
- };
- auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
- PrePostActionTy &) {
-
- RegionCodeGenTy RCG(CodeGen);
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *Args[] = {RTLoc, ThreadID};
-
- NVPTXActionTy Action(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
- Args,
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
- Args);
- RCG.setAction(Action);
- RCG(CGF);
- };
-
- auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
- PrePostActionTy &Action) {
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Function *WFn = WrapperFunctionsMap[Fn];
- assert(WFn && "Wrapper function does not exist!");
- llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
-
- // Prepare for parallel region. Indicate the outlined function.
- llvm::Value *Args[] = {ID};
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
- Args);
-
- // Create a private scope that will globalize the arguments
- // passed from the outside of the target region.
- CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
-
- // There's something to share.
- if (!CapturedVars.empty()) {
- // Prepare for parallel region. Indicate the outlined function.
- Address SharedArgs =
- CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
- llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
-
- llvm::Value *DataSharingArgs[] = {
- SharedArgsPtr,
- llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
- CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_begin_sharing_variables),
- DataSharingArgs);
-
- // Store variable address in a list of references to pass to workers.
- unsigned Idx = 0;
- ASTContext &Ctx = CGF.getContext();
- Address SharedArgListAddress = CGF.EmitLoadOfPointer(
- SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
- .castAs<PointerType>());
- for (llvm::Value *V : CapturedVars) {
- Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
- llvm::Value *PtrV;
- if (V->getType()->isIntegerTy())
- PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
- else
- PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
- CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
- Ctx.getPointerType(Ctx.VoidPtrTy));
- ++Idx;
- }
- }
-
- // Activate workers. This barrier is used by the master to signal
- // work for the workers.
- syncCTAThreads(CGF);
-
- // OpenMP [2.5, Parallel Construct, p.49]
- // There is an implied barrier at the end of a parallel region. After the
- // end of a parallel region, only the master thread of the team resumes
- // execution of the enclosing task region.
- //
- // The master waits at this barrier until all workers are done.
- syncCTAThreads(CGF);
-
- if (!CapturedVars.empty())
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_sharing_variables));
-
- // Remember for post-processing in worker loop.
- Work.emplace_back(WFn);
- };
-
- auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
- if (IsInParallelRegion) {
- SeqGen(CGF, Action);
- } else if (IsInTargetMasterThreadRegion) {
- L0ParallelGen(CGF, Action);
- } else {
- // Check for master and then parallelism:
- // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
- // Serialized execution.
- // } else {
- // Worker call.
- // }
- CGBuilderTy &Bld = CGF.Builder;
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
- llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
- llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
- llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
- llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
- Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(ParallelCheckBB);
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *PL = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
- {RTLoc, ThreadID});
- llvm::Value *Res = Bld.CreateIsNotNull(PL);
- Bld.CreateCondBr(Res, SeqBB, MasterBB);
- CGF.EmitBlock(SeqBB);
- SeqGen(CGF, Action);
- CGF.EmitBranch(ExitBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(MasterBB);
- L0ParallelGen(CGF, Action);
- CGF.EmitBranch(ExitBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- // Emit the continuation block for code after the if.
- CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
- }
- };
-
- if (IfCond) {
- emitIfClause(CGF, IfCond, LNParallelGen, SeqGen);
- } else {
- CodeGenFunction::RunCleanupsScope Scope(CGF);
- RegionCodeGenTy ThenRCG(LNParallelGen);
- ThenRCG(CGF);
- }
-}
-
-void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
- // Just call the outlined function to execute the parallel region.
- // OutlinedFn(&GTid, &zero, CapturedStruct);
- //
- llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
-
- Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- // ThreadId for serialized parallels is 0.
- Address ThreadIDAddr = ZeroAddr;
- auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
- Action.Enter(CGF);
-
- Address ZeroAddr =
- CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".bound.zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
- OutlinedFnArgs.push_back(ZeroAddr.getPointer());
- OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
- emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
- };
- auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
- PrePostActionTy &) {
-
- RegionCodeGenTy RCG(CodeGen);
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *Args[] = {RTLoc, ThreadID};
-
- NVPTXActionTy Action(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
- Args,
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
- Args);
- RCG.setAction(Action);
- RCG(CGF);
- };
-
- if (IsInTargetMasterThreadRegion) {
- // In the worker need to use the real thread id.
- ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
- RegionCodeGenTy RCG(CodeGen);
- RCG(CGF);
- } else {
- // If we are not in the target region, it is definitely L2 parallelism or
- // more, because for SPMD mode we always has L1 parallel level, sowe don't
- // need to check for orphaned directives.
- RegionCodeGenTy RCG(SeqGen);
- RCG(CGF);
- }
-}
-
-void CGOpenMPRuntimeNVPTX::syncCTAThreads(CodeGenFunction &CGF) {
- // Always emit simple barriers!
- if (!CGF.HaveInsertPoint())
- return;
- // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
- // This function does not use parameters, so we can emit just default values.
- llvm::Value *Args[] = {
- llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(getIdentTyPointerTy())),
- llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
- llvm::CallInst *Call = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier_simple_spmd), Args);
- Call->setConvergent();
-}
-
-void CGOpenMPRuntimeNVPTX::emitBarrierCall(CodeGenFunction &CGF,
- SourceLocation Loc,
- OpenMPDirectiveKind Kind, bool,
- bool) {
- // Always emit simple barriers!
- if (!CGF.HaveInsertPoint())
- return;
- // Build call __kmpc_cancel_barrier(loc, thread_id);
- unsigned Flags = getDefaultFlagsForBarriers(Kind);
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
- getThreadID(CGF, Loc)};
- llvm::CallInst *Call = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier), Args);
- Call->setConvergent();
-}
-
-void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
- CodeGenFunction &CGF, StringRef CriticalName,
- const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
- const Expr *Hint) {
- llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
- llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
- llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
- llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
-
- // Get the mask of active threads in the warp.
- llvm::Value *Mask = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_warp_active_thread_mask));
- // Fetch team-local id of the thread.
- llvm::Value *ThreadID = getNVPTXThreadID(CGF);
-
- // Get the width of the team.
- llvm::Value *TeamWidth = getNVPTXNumThreads(CGF);
-
- // Initialize the counter variable for the loop.
- QualType Int32Ty =
- CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
- Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
- LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
- CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
- /*isInit=*/true);
-
- // Block checks if loop counter exceeds upper bound.
- CGF.EmitBlock(LoopBB);
- llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
- llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
- CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
-
- // Block tests which single thread should execute region, and which threads
- // should go straight to synchronisation point.
- CGF.EmitBlock(TestBB);
- CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
- llvm::Value *CmpThreadToCounter =
- CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
- CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
-
- // Block emits the body of the critical region.
- CGF.EmitBlock(BodyBB);
-
- // Output the critical statement.
- CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
- Hint);
-
- // After the body surrounded by the critical region, the single executing
- // thread will jump to the synchronisation point.
- // Block waits for all threads in current team to finish then increments the
- // counter variable and returns to the loop.
- CGF.EmitBlock(SyncBB);
- // Reconverge active threads in the warp.
- (void)CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_syncwarp), Mask);
-
- llvm::Value *IncCounterVal =
- CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
- CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
- CGF.EmitBranch(LoopBB);
-
- // Block that is reached when all threads in the team complete the region.
- CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
-}
-
-/// Cast value to the specified type.
-static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
- QualType ValTy, QualType CastTy,
- SourceLocation Loc) {
- assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
- "Cast type must sized.");
- assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
- "Val type must sized.");
- llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
- if (ValTy == CastTy)
- return Val;
- if (CGF.getContext().getTypeSizeInChars(ValTy) ==
- CGF.getContext().getTypeSizeInChars(CastTy))
- return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
- if (CastTy->isIntegerType() && ValTy->isIntegerType())
- return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
- CastTy->hasSignedIntegerRepresentation());
- Address CastItem = CGF.CreateMemTemp(CastTy);
- Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
- CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy);
- return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc);
-}
-
-/// This function creates calls to one of two shuffle functions to copy
-/// variables between lanes in a warp.
-static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
- llvm::Value *Elem,
- QualType ElemType,
- llvm::Value *Offset,
- SourceLocation Loc) {
- CodeGenModule &CGM = CGF.CGM;
- CGBuilderTy &Bld = CGF.Builder;
- CGOpenMPRuntimeNVPTX &RT =
- *(static_cast<CGOpenMPRuntimeNVPTX *>(&CGM.getOpenMPRuntime()));
-
- CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
- assert(Size.getQuantity() <= 8 &&
- "Unsupported bitwidth in shuffle instruction.");
-
- OpenMPRTLFunctionNVPTX ShuffleFn = Size.getQuantity() <= 4
- ? OMPRTL_NVPTX__kmpc_shuffle_int32
- : OMPRTL_NVPTX__kmpc_shuffle_int64;
-
- // Cast all types to 32- or 64-bit values before calling shuffle routines.
- QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
- Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
- llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
- llvm::Value *WarpSize =
- Bld.CreateIntCast(getNVPTXWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
-
- llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
- RT.createNVPTXRuntimeFunction(ShuffleFn), {ElemCast, Offset, WarpSize});
-
- return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
-}
-
-static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
- Address DestAddr, QualType ElemType,
- llvm::Value *Offset, SourceLocation Loc) {
- CGBuilderTy &Bld = CGF.Builder;
-
- CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
- // Create the loop over the big sized data.
- // ptr = (void*)Elem;
- // ptrEnd = (void*) Elem + 1;
- // Step = 8;
- // while (ptr + Step < ptrEnd)
- // shuffle((int64_t)*ptr);
- // Step = 4;
- // while (ptr + Step < ptrEnd)
- // shuffle((int32_t)*ptr);
- // ...
- Address ElemPtr = DestAddr;
- Address Ptr = SrcAddr;
- Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
- for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
- if (Size < CharUnits::fromQuantity(IntSize))
- continue;
- QualType IntType = CGF.getContext().getIntTypeForBitwidth(
- CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
- /*Signed=*/1);
- llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
- Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
- ElemPtr =
- Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
- if (Size.getQuantity() / IntSize > 1) {
- llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
- llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
- CGF.EmitBlock(PreCondBB);
- llvm::PHINode *PhiSrc =
- Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
- PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
- llvm::PHINode *PhiDest =
- Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
- PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
- Ptr = Address(PhiSrc, Ptr.getAlignment());
- ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
- llvm::Value *PtrDiff = Bld.CreatePtrDiff(
- PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
- Ptr.getPointer(), CGF.VoidPtrTy));
- Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
- ThenBB, ExitBB);
- CGF.EmitBlock(ThenBB);
- llvm::Value *Res = createRuntimeShuffleFunction(
- CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
- IntType, Offset, Loc);
- CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
- Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
- Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
- PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
- PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
- CGF.EmitBranch(PreCondBB);
- CGF.EmitBlock(ExitBB);
- } else {
- llvm::Value *Res = createRuntimeShuffleFunction(
- CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
- IntType, Offset, Loc);
- CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
- Ptr = Bld.CreateConstGEP(Ptr, 1);
- ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
- }
- Size = Size % IntSize;
- }
-}
-
-namespace {
-enum CopyAction : unsigned {
- // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
- // the warp using shuffle instructions.
- RemoteLaneToThread,
- // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
- ThreadCopy,
- // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
- ThreadToScratchpad,
- // ScratchpadToThread: Copy from a scratchpad array in global memory
- // containing team-reduced data to a thread's stack.
- ScratchpadToThread,
-};
-} // namespace
-
-struct CopyOptionsTy {
- llvm::Value *RemoteLaneOffset;
- llvm::Value *ScratchpadIndex;
- llvm::Value *ScratchpadWidth;
-};
-
-/// Emit instructions to copy a Reduce list, which contains partially
-/// aggregated values, in the specified direction.
-static void emitReductionListCopy(
- CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
- ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
- CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
-
- CodeGenModule &CGM = CGF.CGM;
- ASTContext &C = CGM.getContext();
- CGBuilderTy &Bld = CGF.Builder;
-
- llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
- llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
- llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
-
- // Iterates, element-by-element, through the source Reduce list and
- // make a copy.
- unsigned Idx = 0;
- unsigned Size = Privates.size();
- for (const Expr *Private : Privates) {
- Address SrcElementAddr = Address::invalid();
- Address DestElementAddr = Address::invalid();
- Address DestElementPtrAddr = Address::invalid();
- // Should we shuffle in an element from a remote lane?
- bool ShuffleInElement = false;
- // Set to true to update the pointer in the dest Reduce list to a
- // newly created element.
- bool UpdateDestListPtr = false;
- // Increment the src or dest pointer to the scratchpad, for each
- // new element.
- bool IncrScratchpadSrc = false;
- bool IncrScratchpadDest = false;
-
- switch (Action) {
- case RemoteLaneToThread: {
- // Step 1.1: Get the address for the src element in the Reduce list.
- Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
-
- // Step 1.2: Create a temporary to store the element in the destination
- // Reduce list.
- DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
- DestElementAddr =
- CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
- ShuffleInElement = true;
- UpdateDestListPtr = true;
- break;
- }
- case ThreadCopy: {
- // Step 1.1: Get the address for the src element in the Reduce list.
- Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
-
- // Step 1.2: Get the address for dest element. The destination
- // element has already been created on the thread's stack.
- DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
- DestElementAddr = CGF.EmitLoadOfPointer(
- DestElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
- break;
- }
- case ThreadToScratchpad: {
- // Step 1.1: Get the address for the src element in the Reduce list.
- Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
-
- // Step 1.2: Get the address for dest element:
- // address = base + index * ElementSizeInChars.
- llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
- llvm::Value *CurrentOffset =
- Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
- llvm::Value *ScratchPadElemAbsolutePtrVal =
- Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
- ScratchPadElemAbsolutePtrVal =
- Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
- DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
- C.getTypeAlignInChars(Private->getType()));
- IncrScratchpadDest = true;
- break;
- }
- case ScratchpadToThread: {
- // Step 1.1: Get the address for the src element in the scratchpad.
- // address = base + index * ElementSizeInChars.
- llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
- llvm::Value *CurrentOffset =
- Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
- llvm::Value *ScratchPadElemAbsolutePtrVal =
- Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
- ScratchPadElemAbsolutePtrVal =
- Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
- SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
- C.getTypeAlignInChars(Private->getType()));
- IncrScratchpadSrc = true;
-
- // Step 1.2: Create a temporary to store the element in the destination
- // Reduce list.
- DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
- DestElementAddr =
- CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
- UpdateDestListPtr = true;
- break;
- }
- }
-
- // Regardless of src and dest of copy, we emit the load of src
- // element as this is required in all directions
- SrcElementAddr = Bld.CreateElementBitCast(
- SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
- DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
- SrcElementAddr.getElementType());
-
- // Now that all active lanes have read the element in the
- // Reduce list, shuffle over the value from the remote lane.
- if (ShuffleInElement) {
- shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
- RemoteLaneOffset, Private->getExprLoc());
- } else {
- switch (CGF.getEvaluationKind(Private->getType())) {
- case TEK_Scalar: {
- llvm::Value *Elem =
- CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
- Private->getType(), Private->getExprLoc());
- // Store the source element value to the dest element address.
- CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
- Private->getType());
- break;
- }
- case TEK_Complex: {
- CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
- CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
- Private->getExprLoc());
- CGF.EmitStoreOfComplex(
- Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
- /*isInit=*/false);
- break;
- }
- case TEK_Aggregate:
- CGF.EmitAggregateCopy(
- CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
- CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
- Private->getType(), AggValueSlot::DoesNotOverlap);
- break;
- }
- }
-
- // Step 3.1: Modify reference in dest Reduce list as needed.
- // Modifying the reference in Reduce list to point to the newly
- // created element. The element is live in the current function
- // scope and that of functions it invokes (i.e., reduce_function).
- // RemoteReduceData[i] = (void*)&RemoteElem
- if (UpdateDestListPtr) {
- CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
- DestElementAddr.getPointer(), CGF.VoidPtrTy),
- DestElementPtrAddr, /*Volatile=*/false,
- C.VoidPtrTy);
- }
-
- // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
- // address of the next element in scratchpad memory, unless we're currently
- // processing the last one. Memory alignment is also taken care of here.
- if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
- llvm::Value *ScratchpadBasePtr =
- IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
- llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
- ScratchpadBasePtr = Bld.CreateNUWAdd(
- ScratchpadBasePtr,
- Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
-
- // Take care of global memory alignment for performance
- ScratchpadBasePtr = Bld.CreateNUWSub(
- ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
- ScratchpadBasePtr = Bld.CreateUDiv(
- ScratchpadBasePtr,
- llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
- ScratchpadBasePtr = Bld.CreateNUWAdd(
- ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
- ScratchpadBasePtr = Bld.CreateNUWMul(
- ScratchpadBasePtr,
- llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
-
- if (IncrScratchpadDest)
- DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
- else /* IncrScratchpadSrc = true */
- SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
- }
-
- ++Idx;
- }
-}
-
-/// This function emits a helper that gathers Reduce lists from the first
-/// lane of every active warp to lanes in the first warp.
-///
-/// void inter_warp_copy_func(void* reduce_data, num_warps)
-/// shared smem[warp_size];
-/// For all data entries D in reduce_data:
-/// sync
-/// If (I am the first lane in each warp)
-/// Copy my local D to smem[warp_id]
-/// sync
-/// if (I am the first warp)
-/// Copy smem[thread_id] to my local D
-static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
- ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy,
- SourceLocation Loc) {
- ASTContext &C = CGM.getContext();
- llvm::Module &M = CGM.getModule();
-
- // ReduceList: thread local Reduce list.
- // At the stage of the computation when this function is called, partially
- // aggregated values reside in the first lane of every active warp.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // NumWarps: number of warps active in the parallel region. This could
- // be smaller than 32 (max warps in a CTA) for partial block reduction.
- ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.getIntTypeForBitwidth(32, /* Signed */ true),
- ImplicitParamDecl::Other);
- FunctionArgList Args;
- Args.push_back(&ReduceListArg);
- Args.push_back(&NumWarpsArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
- llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_inter_warp_copy_func", &M);
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- // This array is used as a medium to transfer, one reduce element at a time,
- // the data from the first lane of every warp to lanes in the first warp
- // in order to perform the final step of a reduction in a parallel region
- // (reduction across warps). The array is placed in NVPTX __shared__ memory
- // for reduced latency, as well as to have a distinct copy for concurrently
- // executing target regions. The array is declared with common linkage so
- // as to be shared across compilation units.
- StringRef TransferMediumName =
- "__openmp_nvptx_data_transfer_temporary_storage";
- llvm::GlobalVariable *TransferMedium =
- M.getGlobalVariable(TransferMediumName);
- if (!TransferMedium) {
- auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
- unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
- TransferMedium = new llvm::GlobalVariable(
- M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
- llvm::Constant::getNullValue(Ty), TransferMediumName,
- /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
- SharedAddressSpace);
- CGM.addCompilerUsedGlobal(TransferMedium);
- }
-
- // Get the CUDA thread id of the current OpenMP thread on the GPU.
- llvm::Value *ThreadID = getNVPTXThreadID(CGF);
- // nvptx_lane_id = nvptx_id % warpsize
- llvm::Value *LaneID = getNVPTXLaneID(CGF);
- // nvptx_warp_id = nvptx_id / warpsize
- llvm::Value *WarpID = getNVPTXWarpID(CGF);
-
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- Address LocalReduceList(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
-
- unsigned Idx = 0;
- for (const Expr *Private : Privates) {
- //
- // Warp master copies reduce element to transfer medium in __shared__
- // memory.
- //
- unsigned RealTySize =
- C.getTypeSizeInChars(Private->getType())
- .alignTo(C.getTypeAlignInChars(Private->getType()))
- .getQuantity();
- for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
- unsigned NumIters = RealTySize / TySize;
- if (NumIters == 0)
- continue;
- QualType CType = C.getIntTypeForBitwidth(
- C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
- llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
- CharUnits Align = CharUnits::fromQuantity(TySize);
- llvm::Value *Cnt = nullptr;
- Address CntAddr = Address::invalid();
- llvm::BasicBlock *PrecondBB = nullptr;
- llvm::BasicBlock *ExitBB = nullptr;
- if (NumIters > 1) {
- CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
- CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
- /*Volatile=*/false, C.IntTy);
- PrecondBB = CGF.createBasicBlock("precond");
- ExitBB = CGF.createBasicBlock("exit");
- llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(PrecondBB);
- Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
- llvm::Value *Cmp =
- Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
- Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
- CGF.EmitBlock(BodyBB);
- }
- // kmpc_barrier.
- CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
- /*EmitChecks=*/false,
- /*ForceSimpleCall=*/true);
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
-
- // if (lane_id == 0)
- llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
- Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
- CGF.EmitBlock(ThenBB);
-
- // Reduce element = LocalReduceList[i]
- Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
- llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
- ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- // elemptr = ((CopyType*)(elemptrptr)) + I
- Address ElemPtr = Address(ElemPtrPtr, Align);
- ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
- if (NumIters > 1) {
- ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
- ElemPtr.getAlignment());
- }
-
- // Get pointer to location in transfer medium.
- // MediumPtr = &medium[warp_id]
- llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
- TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
- Address MediumPtr(MediumPtrVal, Align);
- // Casting to actual data type.
- // MediumPtr = (CopyType*)MediumPtrAddr;
- MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
-
- // elem = *elemptr
- //*MediumPtr = elem
- llvm::Value *Elem =
- CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false, CType, Loc);
- // Store the source element value to the dest element address.
- CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType);
-
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(ElseBB);
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(MergeBB);
-
- // kmpc_barrier.
- CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
- /*EmitChecks=*/false,
- /*ForceSimpleCall=*/true);
-
- //
- // Warp 0 copies reduce element from transfer medium.
- //
- llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
-
- Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
- llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
- AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
-
- // Up to 32 threads in warp 0 are active.
- llvm::Value *IsActiveThread =
- Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
- Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
-
- CGF.EmitBlock(W0ThenBB);
-
- // SrcMediumPtr = &medium[tid]
- llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
- TransferMedium,
- {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
- Address SrcMediumPtr(SrcMediumPtrVal, Align);
- // SrcMediumVal = *SrcMediumPtr;
- SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
-
- // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
- Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
- llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
- TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
- Address TargetElemPtr = Address(TargetElemPtrVal, Align);
- TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
- if (NumIters > 1) {
- TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
- TargetElemPtr.getAlignment());
- }
-
- // *TargetElemPtr = SrcMediumVal;
- llvm::Value *SrcMediumValue =
- CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
- CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
- CType);
- Bld.CreateBr(W0MergeBB);
-
- CGF.EmitBlock(W0ElseBB);
- Bld.CreateBr(W0MergeBB);
-
- CGF.EmitBlock(W0MergeBB);
-
- if (NumIters > 1) {
- Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
- CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
- CGF.EmitBranch(PrecondBB);
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(ExitBB);
- }
- RealTySize %= TySize;
- }
- ++Idx;
- }
-
- CGF.FinishFunction();
- return Fn;
-}
-
-/// Emit a helper that reduces data across two OpenMP threads (lanes)
-/// in the same warp. It uses shuffle instructions to copy over data from
-/// a remote lane's stack. The reduction algorithm performed is specified
-/// by the fourth parameter.
-///
-/// Algorithm Versions.
-/// Full Warp Reduce (argument value 0):
-/// This algorithm assumes that all 32 lanes are active and gathers
-/// data from these 32 lanes, producing a single resultant value.
-/// Contiguous Partial Warp Reduce (argument value 1):
-/// This algorithm assumes that only a *contiguous* subset of lanes
-/// are active. This happens for the last warp in a parallel region
-/// when the user specified num_threads is not an integer multiple of
-/// 32. This contiguous subset always starts with the zeroth lane.
-/// Partial Warp Reduce (argument value 2):
-/// This algorithm gathers data from any number of lanes at any position.
-/// All reduced values are stored in the lowest possible lane. The set
-/// of problems every algorithm addresses is a super set of those
-/// addressable by algorithms with a lower version number. Overhead
-/// increases as algorithm version increases.
-///
-/// Terminology
-/// Reduce element:
-/// Reduce element refers to the individual data field with primitive
-/// data types to be combined and reduced across threads.
-/// Reduce list:
-/// Reduce list refers to a collection of local, thread-private
-/// reduce elements.
-/// Remote Reduce list:
-/// Remote Reduce list refers to a collection of remote (relative to
-/// the current thread) reduce elements.
-///
-/// We distinguish between three states of threads that are important to
-/// the implementation of this function.
-/// Alive threads:
-/// Threads in a warp executing the SIMT instruction, as distinguished from
-/// threads that are inactive due to divergent control flow.
-/// Active threads:
-/// The minimal set of threads that has to be alive upon entry to this
-/// function. The computation is correct iff active threads are alive.
-/// Some threads are alive but they are not active because they do not
-/// contribute to the computation in any useful manner. Turning them off
-/// may introduce control flow overheads without any tangible benefits.
-/// Effective threads:
-/// In order to comply with the argument requirements of the shuffle
-/// function, we must keep all lanes holding data alive. But at most
-/// half of them perform value aggregation; we refer to this half of
-/// threads as effective. The other half is simply handing off their
-/// data.
-///
-/// Procedure
-/// Value shuffle:
-/// In this step active threads transfer data from higher lane positions
-/// in the warp to lower lane positions, creating Remote Reduce list.
-/// Value aggregation:
-/// In this step, effective threads combine their thread local Reduce list
-/// with Remote Reduce list and store the result in the thread local
-/// Reduce list.
-/// Value copy:
-/// In this step, we deal with the assumption made by algorithm 2
-/// (i.e. contiguity assumption). When we have an odd number of lanes
-/// active, say 2k+1, only k threads will be effective and therefore k
-/// new values will be produced. However, the Reduce list owned by the
-/// (2k+1)th thread is ignored in the value aggregation. Therefore
-/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
-/// that the contiguity assumption still holds.
-static llvm::Function *emitShuffleAndReduceFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
- ASTContext &C = CGM.getContext();
-
- // Thread local Reduce list used to host the values of data to be reduced.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // Current lane id; could be logical.
- ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
- ImplicitParamDecl::Other);
- // Offset of the remote source lane relative to the current lane.
- ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.ShortTy, ImplicitParamDecl::Other);
- // Algorithm version. This is expected to be known at compile time.
- ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.ShortTy, ImplicitParamDecl::Other);
- FunctionArgList Args;
- Args.push_back(&ReduceListArg);
- Args.push_back(&LaneIDArg);
- Args.push_back(&RemoteLaneOffsetArg);
- Args.push_back(&AlgoVerArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- if (CGM.getLangOpts().Optimize) {
- Fn->removeFnAttr(llvm::Attribute::NoInline);
- Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
- Fn->addFnAttr(llvm::Attribute::AlwaysInline);
- }
-
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- Address LocalReduceList(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, SourceLocation()),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
-
- Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
- llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
- AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
-
- Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
- llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
- AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
-
- Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
- llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
- AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
-
- // Create a local thread-private variable to host the Reduce list
- // from a remote lane.
- Address RemoteReduceList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
-
- // This loop iterates through the list of reduce elements and copies,
- // element by element, from a remote lane in the warp to RemoteReduceList,
- // hosted on the thread's stack.
- emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
- LocalReduceList, RemoteReduceList,
- {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
- /*ScratchpadIndex=*/nullptr,
- /*ScratchpadWidth=*/nullptr});
-
- // The actions to be performed on the Remote Reduce list is dependent
- // on the algorithm version.
- //
- // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
- // LaneId % 2 == 0 && Offset > 0):
- // do the reduction value aggregation
- //
- // The thread local variable Reduce list is mutated in place to host the
- // reduced data, which is the aggregated value produced from local and
- // remote lanes.
- //
- // Note that AlgoVer is expected to be a constant integer known at compile
- // time.
- // When AlgoVer==0, the first conjunction evaluates to true, making
- // the entire predicate true during compile time.
- // When AlgoVer==1, the second conjunction has only the second part to be
- // evaluated during runtime. Other conjunctions evaluates to false
- // during compile time.
- // When AlgoVer==2, the third conjunction has only the second part to be
- // evaluated during runtime. Other conjunctions evaluates to false
- // during compile time.
- llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
-
- llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
- llvm::Value *CondAlgo1 = Bld.CreateAnd(
- Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
-
- llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
- llvm::Value *CondAlgo2 = Bld.CreateAnd(
- Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
- CondAlgo2 = Bld.CreateAnd(
- CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
-
- llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
- CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
-
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
- Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
-
- CGF.EmitBlock(ThenBB);
- // reduce_function(LocalReduceList, RemoteReduceList)
- llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- LocalReduceList.getPointer(), CGF.VoidPtrTy);
- llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- RemoteReduceList.getPointer(), CGF.VoidPtrTy);
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
- CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(ElseBB);
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(MergeBB);
-
- // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
- // Reduce list.
- Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
- llvm::Value *CondCopy = Bld.CreateAnd(
- Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
-
- llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
- Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
-
- CGF.EmitBlock(CpyThenBB);
- emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
- RemoteReduceList, LocalReduceList);
- Bld.CreateBr(CpyMergeBB);
-
- CGF.EmitBlock(CpyElseBB);
- Bld.CreateBr(CpyMergeBB);
-
- CGF.EmitBlock(CpyMergeBB);
-
- CGF.FinishFunction();
- return Fn;
-}
-
-/// This function emits a helper that copies all the reduction variables from
-/// the team into the provided global buffer for the reduction variables.
-///
-/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
-/// For all data entries D in reduce_data:
-/// Copy local D to buffer.D[Idx]
-static llvm::Value *emitListToGlobalCopyFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, SourceLocation Loc,
- const RecordDecl *TeamReductionRec,
- const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &VarFieldMap) {
- ASTContext &C = CGM.getContext();
-
- // Buffer: global reduction buffer.
- ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // Idx: index of the buffer.
- ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamDecl::Other);
- // ReduceList: thread local Reduce list.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- FunctionArgList Args;
- Args.push_back(&BufferArg);
- Args.push_back(&IdxArg);
- Args.push_back(&ReduceListArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
- Address LocalReduceList(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
- QualType StaticTy = C.getRecordType(TeamReductionRec);
- llvm::Type *LLVMReductionsBufferTy =
- CGM.getTypes().ConvertTypeForMem(StaticTy);
- llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
- LLVMReductionsBufferTy->getPointerTo());
- llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
- CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
- /*Volatile=*/false, C.IntTy,
- Loc)};
- unsigned Idx = 0;
- for (const Expr *Private : Privates) {
- // Reduce element = LocalReduceList[i]
- Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
- llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
- ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- // elemptr = ((CopyType*)(elemptrptr)) + I
- ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
- Address ElemPtr =
- Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
- const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
- // Global = Buffer.VD[Idx];
- const FieldDecl *FD = VarFieldMap.lookup(VD);
- LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
- GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
- switch (CGF.getEvaluationKind(Private->getType())) {
- case TEK_Scalar: {
- llvm::Value *V = CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false,
- Private->getType(), Loc);
- CGF.EmitStoreOfScalar(V, GlobLVal);
- break;
- }
- case TEK_Complex: {
- CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
- CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
- CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
- break;
- }
- case TEK_Aggregate:
- CGF.EmitAggregateCopy(GlobLVal,
- CGF.MakeAddrLValue(ElemPtr, Private->getType()),
- Private->getType(), AggValueSlot::DoesNotOverlap);
- break;
- }
- ++Idx;
- }
-
- CGF.FinishFunction();
- return Fn;
-}
-
-/// This function emits a helper that reduces all the reduction variables from
-/// the team into the provided global buffer for the reduction variables.
-///
-/// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
-/// void *GlobPtrs[];
-/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
-/// ...
-/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
-/// reduce_function(GlobPtrs, reduce_data);
-static llvm::Value *emitListToGlobalReduceFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, SourceLocation Loc,
- const RecordDecl *TeamReductionRec,
- const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &VarFieldMap,
- llvm::Function *ReduceFn) {
- ASTContext &C = CGM.getContext();
-
- // Buffer: global reduction buffer.
- ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // Idx: index of the buffer.
- ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamDecl::Other);
- // ReduceList: thread local Reduce list.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- FunctionArgList Args;
- Args.push_back(&BufferArg);
- Args.push_back(&IdxArg);
- Args.push_back(&ReduceListArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
- QualType StaticTy = C.getRecordType(TeamReductionRec);
- llvm::Type *LLVMReductionsBufferTy =
- CGM.getTypes().ConvertTypeForMem(StaticTy);
- llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
- LLVMReductionsBufferTy->getPointerTo());
-
- // 1. Build a list of reduction variables.
- // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- Address ReductionList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
- llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
- CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
- /*Volatile=*/false, C.IntTy,
- Loc)};
- unsigned Idx = 0;
- for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- // Global = Buffer.VD[Idx];
- const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
- const FieldDecl *FD = VarFieldMap.lookup(VD);
- LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
- llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
- CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
- if ((*IPriv)->getType()->isVariablyModifiedType()) {
- // Store array size.
- ++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- llvm::Value *Size = CGF.Builder.CreateIntCast(
- CGF.getVLASize(
- CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .NumElts,
- CGF.SizeTy, /*isSigned=*/false);
- CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
- Elem);
- }
- }
-
- // Call reduce_function(GlobalReduceList, ReduceList)
- llvm::Value *GlobalReduceList =
- CGF.EmitCastToVoidPtr(ReductionList.getPointer());
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
- AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
- CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
- CGF.FinishFunction();
- return Fn;
-}
-
-/// This function emits a helper that copies all the reduction variables from
-/// the team into the provided global buffer for the reduction variables.
-///
-/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
-/// For all data entries D in reduce_data:
-/// Copy buffer.D[Idx] to local D;
-static llvm::Value *emitGlobalToListCopyFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, SourceLocation Loc,
- const RecordDecl *TeamReductionRec,
- const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &VarFieldMap) {
- ASTContext &C = CGM.getContext();
-
- // Buffer: global reduction buffer.
- ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // Idx: index of the buffer.
- ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamDecl::Other);
- // ReduceList: thread local Reduce list.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- FunctionArgList Args;
- Args.push_back(&BufferArg);
- Args.push_back(&IdxArg);
- Args.push_back(&ReduceListArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
- Address LocalReduceList(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
- QualType StaticTy = C.getRecordType(TeamReductionRec);
- llvm::Type *LLVMReductionsBufferTy =
- CGM.getTypes().ConvertTypeForMem(StaticTy);
- llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
- LLVMReductionsBufferTy->getPointerTo());
-
- llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
- CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
- /*Volatile=*/false, C.IntTy,
- Loc)};
- unsigned Idx = 0;
- for (const Expr *Private : Privates) {
- // Reduce element = LocalReduceList[i]
- Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
- llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
- ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- // elemptr = ((CopyType*)(elemptrptr)) + I
- ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
- Address ElemPtr =
- Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
- const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
- // Global = Buffer.VD[Idx];
- const FieldDecl *FD = VarFieldMap.lookup(VD);
- LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
- GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
- switch (CGF.getEvaluationKind(Private->getType())) {
- case TEK_Scalar: {
- llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
- CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType());
- break;
- }
- case TEK_Complex: {
- CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
- CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
- /*isInit=*/false);
- break;
- }
- case TEK_Aggregate:
- CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
- GlobLVal, Private->getType(),
- AggValueSlot::DoesNotOverlap);
- break;
- }
- ++Idx;
- }
-
- CGF.FinishFunction();
- return Fn;
-}
-
-/// This function emits a helper that reduces all the reduction variables from
-/// the team into the provided global buffer for the reduction variables.
-///
-/// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
-/// void *GlobPtrs[];
-/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
-/// ...
-/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
-/// reduce_function(reduce_data, GlobPtrs);
-static llvm::Value *emitGlobalToListReduceFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, SourceLocation Loc,
- const RecordDecl *TeamReductionRec,
- const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &VarFieldMap,
- llvm::Function *ReduceFn) {
- ASTContext &C = CGM.getContext();
-
- // Buffer: global reduction buffer.
- ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // Idx: index of the buffer.
- ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamDecl::Other);
- // ReduceList: thread local Reduce list.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- FunctionArgList Args;
- Args.push_back(&BufferArg);
- Args.push_back(&IdxArg);
- Args.push_back(&ReduceListArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
- QualType StaticTy = C.getRecordType(TeamReductionRec);
- llvm::Type *LLVMReductionsBufferTy =
- CGM.getTypes().ConvertTypeForMem(StaticTy);
- llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
- LLVMReductionsBufferTy->getPointerTo());
-
- // 1. Build a list of reduction variables.
- // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- Address ReductionList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
- llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
- CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
- /*Volatile=*/false, C.IntTy,
- Loc)};
- unsigned Idx = 0;
- for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- // Global = Buffer.VD[Idx];
- const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
- const FieldDecl *FD = VarFieldMap.lookup(VD);
- LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
- llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
- CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
- if ((*IPriv)->getType()->isVariablyModifiedType()) {
- // Store array size.
- ++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- llvm::Value *Size = CGF.Builder.CreateIntCast(
- CGF.getVLASize(
- CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .NumElts,
- CGF.SizeTy, /*isSigned=*/false);
- CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
- Elem);
- }
- }
-
- // Call reduce_function(ReduceList, GlobalReduceList)
- llvm::Value *GlobalReduceList =
- CGF.EmitCastToVoidPtr(ReductionList.getPointer());
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
- AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
- CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
- CGF.FinishFunction();
- return Fn;
-}
-
-///
-/// Design of OpenMP reductions on the GPU
-///
-/// Consider a typical OpenMP program with one or more reduction
-/// clauses:
-///
-/// float foo;
-/// double bar;
-/// #pragma omp target teams distribute parallel for \
-/// reduction(+:foo) reduction(*:bar)
-/// for (int i = 0; i < N; i++) {
-/// foo += A[i]; bar *= B[i];
-/// }
-///
-/// where 'foo' and 'bar' are reduced across all OpenMP threads in
-/// all teams. In our OpenMP implementation on the NVPTX device an
-/// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
-/// within a team are mapped to CUDA threads within a threadblock.
-/// Our goal is to efficiently aggregate values across all OpenMP
-/// threads such that:
-///
-/// - the compiler and runtime are logically concise, and
-/// - the reduction is performed efficiently in a hierarchical
-/// manner as follows: within OpenMP threads in the same warp,
-/// across warps in a threadblock, and finally across teams on
-/// the NVPTX device.
-///
-/// Introduction to Decoupling
-///
-/// We would like to decouple the compiler and the runtime so that the
-/// latter is ignorant of the reduction variables (number, data types)
-/// and the reduction operators. This allows a simpler interface
-/// and implementation while still attaining good performance.
-///
-/// Pseudocode for the aforementioned OpenMP program generated by the
-/// compiler is as follows:
-///
-/// 1. Create private copies of reduction variables on each OpenMP
-/// thread: 'foo_private', 'bar_private'
-/// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
-/// to it and writes the result in 'foo_private' and 'bar_private'
-/// respectively.
-/// 3. Call the OpenMP runtime on the GPU to reduce within a team
-/// and store the result on the team master:
-///
-/// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
-/// reduceData, shuffleReduceFn, interWarpCpyFn)
-///
-/// where:
-/// struct ReduceData {
-/// double *foo;
-/// double *bar;
-/// } reduceData
-/// reduceData.foo = &foo_private
-/// reduceData.bar = &bar_private
-///
-/// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
-/// auxiliary functions generated by the compiler that operate on
-/// variables of type 'ReduceData'. They aid the runtime perform
-/// algorithmic steps in a data agnostic manner.
-///
-/// 'shuffleReduceFn' is a pointer to a function that reduces data
-/// of type 'ReduceData' across two OpenMP threads (lanes) in the
-/// same warp. It takes the following arguments as input:
-///
-/// a. variable of type 'ReduceData' on the calling lane,
-/// b. its lane_id,
-/// c. an offset relative to the current lane_id to generate a
-/// remote_lane_id. The remote lane contains the second
-/// variable of type 'ReduceData' that is to be reduced.
-/// d. an algorithm version parameter determining which reduction
-/// algorithm to use.
-///
-/// 'shuffleReduceFn' retrieves data from the remote lane using
-/// efficient GPU shuffle intrinsics and reduces, using the
-/// algorithm specified by the 4th parameter, the two operands
-/// element-wise. The result is written to the first operand.
-///
-/// Different reduction algorithms are implemented in different
-/// runtime functions, all calling 'shuffleReduceFn' to perform
-/// the essential reduction step. Therefore, based on the 4th
-/// parameter, this function behaves slightly differently to
-/// cooperate with the runtime to ensure correctness under
-/// different circumstances.
-///
-/// 'InterWarpCpyFn' is a pointer to a function that transfers
-/// reduced variables across warps. It tunnels, through CUDA
-/// shared memory, the thread-private data of type 'ReduceData'
-/// from lane 0 of each warp to a lane in the first warp.
-/// 4. Call the OpenMP runtime on the GPU to reduce across teams.
-/// The last team writes the global reduced value to memory.
-///
-/// ret = __kmpc_nvptx_teams_reduce_nowait(...,
-/// reduceData, shuffleReduceFn, interWarpCpyFn,
-/// scratchpadCopyFn, loadAndReduceFn)
-///
-/// 'scratchpadCopyFn' is a helper that stores reduced
-/// data from the team master to a scratchpad array in
-/// global memory.
-///
-/// 'loadAndReduceFn' is a helper that loads data from
-/// the scratchpad array and reduces it with the input
-/// operand.
-///
-/// These compiler generated functions hide address
-/// calculation and alignment information from the runtime.
-/// 5. if ret == 1:
-/// The team master of the last team stores the reduced
-/// result to the globals in memory.
-/// foo += reduceData.foo; bar *= reduceData.bar
-///
-///
-/// Warp Reduction Algorithms
-///
-/// On the warp level, we have three algorithms implemented in the
-/// OpenMP runtime depending on the number of active lanes:
-///
-/// Full Warp Reduction
-///
-/// The reduce algorithm within a warp where all lanes are active
-/// is implemented in the runtime as follows:
-///
-/// full_warp_reduce(void *reduce_data,
-/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
-/// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
-/// ShuffleReduceFn(reduce_data, 0, offset, 0);
-/// }
-///
-/// The algorithm completes in log(2, WARPSIZE) steps.
-///
-/// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
-/// not used therefore we save instructions by not retrieving lane_id
-/// from the corresponding special registers. The 4th parameter, which
-/// represents the version of the algorithm being used, is set to 0 to
-/// signify full warp reduction.
-///
-/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
-///
-/// #reduce_elem refers to an element in the local lane's data structure
-/// #remote_elem is retrieved from a remote lane
-/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
-/// reduce_elem = reduce_elem REDUCE_OP remote_elem;
-///
-/// Contiguous Partial Warp Reduction
-///
-/// This reduce algorithm is used within a warp where only the first
-/// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
-/// number of OpenMP threads in a parallel region is not a multiple of
-/// WARPSIZE. The algorithm is implemented in the runtime as follows:
-///
-/// void
-/// contiguous_partial_reduce(void *reduce_data,
-/// kmp_ShuffleReductFctPtr ShuffleReduceFn,
-/// int size, int lane_id) {
-/// int curr_size;
-/// int offset;
-/// curr_size = size;
-/// mask = curr_size/2;
-/// while (offset>0) {
-/// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
-/// curr_size = (curr_size+1)/2;
-/// offset = curr_size/2;
-/// }
-/// }
-///
-/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
-///
-/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
-/// if (lane_id < offset)
-/// reduce_elem = reduce_elem REDUCE_OP remote_elem
-/// else
-/// reduce_elem = remote_elem
-///
-/// This algorithm assumes that the data to be reduced are located in a
-/// contiguous subset of lanes starting from the first. When there is
-/// an odd number of active lanes, the data in the last lane is not
-/// aggregated with any other lane's dat but is instead copied over.
-///
-/// Dispersed Partial Warp Reduction
-///
-/// This algorithm is used within a warp when any discontiguous subset of
-/// lanes are active. It is used to implement the reduction operation
-/// across lanes in an OpenMP simd region or in a nested parallel region.
-///
-/// void
-/// dispersed_partial_reduce(void *reduce_data,
-/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
-/// int size, remote_id;
-/// int logical_lane_id = number_of_active_lanes_before_me() * 2;
-/// do {
-/// remote_id = next_active_lane_id_right_after_me();
-/// # the above function returns 0 of no active lane
-/// # is present right after the current lane.
-/// size = number_of_active_lanes_in_this_warp();
-/// logical_lane_id /= 2;
-/// ShuffleReduceFn(reduce_data, logical_lane_id,
-/// remote_id-1-threadIdx.x, 2);
-/// } while (logical_lane_id % 2 == 0 && size > 1);
-/// }
-///
-/// There is no assumption made about the initial state of the reduction.
-/// Any number of lanes (>=1) could be active at any position. The reduction
-/// result is returned in the first active lane.
-///
-/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
-///
-/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
-/// if (lane_id % 2 == 0 && offset > 0)
-/// reduce_elem = reduce_elem REDUCE_OP remote_elem
-/// else
-/// reduce_elem = remote_elem
-///
-///
-/// Intra-Team Reduction
-///
-/// This function, as implemented in the runtime call
-/// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
-/// threads in a team. It first reduces within a warp using the
-/// aforementioned algorithms. We then proceed to gather all such
-/// reduced values at the first warp.
-///
-/// The runtime makes use of the function 'InterWarpCpyFn', which copies
-/// data from each of the "warp master" (zeroth lane of each warp, where
-/// warp-reduced data is held) to the zeroth warp. This step reduces (in
-/// a mathematical sense) the problem of reduction across warp masters in
-/// a block to the problem of warp reduction.
-///
-///
-/// Inter-Team Reduction
-///
-/// Once a team has reduced its data to a single value, it is stored in
-/// a global scratchpad array. Since each team has a distinct slot, this
-/// can be done without locking.
-///
-/// The last team to write to the scratchpad array proceeds to reduce the
-/// scratchpad array. One or more workers in the last team use the helper
-/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
-/// the k'th worker reduces every k'th element.
-///
-/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
-/// reduce across workers and compute a globally reduced value.
-///
-void CGOpenMPRuntimeNVPTX::emitReduction(
- CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
- if (!CGF.HaveInsertPoint())
- return;
-
- bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
-#ifndef NDEBUG
- bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
-#endif
-
- if (Options.SimpleReduction) {
- assert(!TeamsReduction && !ParallelReduction &&
- "Invalid reduction selection in emitReduction.");
- CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
- ReductionOps, Options);
- return;
- }
-
- assert((TeamsReduction || ParallelReduction) &&
- "Invalid reduction selection in emitReduction.");
-
- // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
- // RedList, shuffle_reduce_func, interwarp_copy_func);
- // or
- // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *ThreadId = getThreadID(CGF, Loc);
-
- llvm::Value *Res;
- ASTContext &C = CGM.getContext();
- // 1. Build a list of reduction variables.
- // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- auto Size = RHSExprs.size();
- for (const Expr *E : Privates) {
- if (E->getType()->isVariablyModifiedType())
- // Reserve place for array size.
- ++Size;
- }
- llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
- QualType ReductionArrayTy =
- C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Address ReductionList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
- unsigned Idx = 0;
- for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- CGF.Builder.CreateStore(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
- Elem);
- if ((*IPriv)->getType()->isVariablyModifiedType()) {
- // Store array size.
- ++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- llvm::Value *Size = CGF.Builder.CreateIntCast(
- CGF.getVLASize(
- CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .NumElts,
- CGF.SizeTy, /*isSigned=*/false);
- CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
- Elem);
- }
- }
-
- llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- ReductionList.getPointer(), CGF.VoidPtrTy);
- llvm::Function *ReductionFn = emitReductionFunction(
- Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
- LHSExprs, RHSExprs, ReductionOps);
- llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
- llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
- CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
- llvm::Value *InterWarpCopyFn =
- emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
-
- if (ParallelReduction) {
- llvm::Value *Args[] = {RTLoc,
- ThreadId,
- CGF.Builder.getInt32(RHSExprs.size()),
- ReductionArrayTySize,
- RL,
- ShuffleAndReduceFn,
- InterWarpCopyFn};
-
- Res = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2),
- Args);
- } else {
- assert(TeamsReduction && "expected teams reduction.");
- llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
- llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
- int Cnt = 0;
- for (const Expr *DRE : Privates) {
- PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
- ++Cnt;
- }
- const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
- CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
- C.getLangOpts().OpenMPCUDAReductionBufNum);
- TeamsReductions.push_back(TeamReductionRec);
- if (!KernelTeamsReductionPtr) {
- KernelTeamsReductionPtr = new llvm::GlobalVariable(
- CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, nullptr,
- "_openmp_teams_reductions_buffer_$_$ptr");
- }
- llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
- Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
- /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
- llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
- CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
- llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
- CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
- ReductionFn);
- llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
- CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
- llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
- CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
- ReductionFn);
-
- llvm::Value *Args[] = {
- RTLoc,
- ThreadId,
- GlobalBufferPtr,
- CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
- RL,
- ShuffleAndReduceFn,
- InterWarpCopyFn,
- GlobalToBufferCpyFn,
- GlobalToBufferRedFn,
- BufferToGlobalCpyFn,
- BufferToGlobalRedFn};
-
- Res = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2),
- Args);
- }
-
- // 5. Build if (res == 1)
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
- llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
- Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
- CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
-
- // 6. Build then branch: where we have reduced values in the master
- // thread in each team.
- // __kmpc_end_reduce{_nowait}(<gtid>);
- // break;
- CGF.EmitBlock(ThenBB);
-
- // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
- auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
- this](CodeGenFunction &CGF, PrePostActionTy &Action) {
- auto IPriv = Privates.begin();
- auto ILHS = LHSExprs.begin();
- auto IRHS = RHSExprs.begin();
- for (const Expr *E : ReductionOps) {
- emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
- cast<DeclRefExpr>(*IRHS));
- ++IPriv;
- ++ILHS;
- ++IRHS;
- }
- };
- llvm::Value *EndArgs[] = {ThreadId};
- RegionCodeGenTy RCG(CodeGen);
- NVPTXActionTy Action(
- nullptr, llvm::None,
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
- EndArgs);
- RCG.setAction(Action);
- RCG(CGF);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
-}
-
-const VarDecl *
-CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD,
- const VarDecl *NativeParam) const {
- if (!NativeParam->getType()->isReferenceType())
- return NativeParam;
- QualType ArgType = NativeParam->getType();
- QualifierCollector QC;
- const Type *NonQualTy = QC.strip(ArgType);
- QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
- if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
- if (Attr->getCaptureKind() == OMPC_map) {
- PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
- LangAS::opencl_global);
- } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
- PointeeTy.isConstant(CGM.getContext())) {
- PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
- LangAS::opencl_generic);
- }
- }
- ArgType = CGM.getContext().getPointerType(PointeeTy);
- QC.addRestrict();
- enum { NVPTX_local_addr = 5 };
- QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
- ArgType = QC.apply(CGM.getContext(), ArgType);
- if (isa<ImplicitParamDecl>(NativeParam))
- return ImplicitParamDecl::Create(
- CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
- NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
- return ParmVarDecl::Create(
- CGM.getContext(),
- const_cast<DeclContext *>(NativeParam->getDeclContext()),
- NativeParam->getBeginLoc(), NativeParam->getLocation(),
- NativeParam->getIdentifier(), ArgType,
- /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
-}
-
-Address
-CGOpenMPRuntimeNVPTX::getParameterAddress(CodeGenFunction &CGF,
- const VarDecl *NativeParam,
- const VarDecl *TargetParam) const {
- assert(NativeParam != TargetParam &&
- NativeParam->getType()->isReferenceType() &&
- "Native arg must not be the same as target arg.");
- Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
- QualType NativeParamType = NativeParam->getType();
- QualifierCollector QC;
- const Type *NonQualTy = QC.strip(NativeParamType);
- QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
- unsigned NativePointeeAddrSpace =
- CGF.getContext().getTargetAddressSpace(NativePointeeTy);
- QualType TargetTy = TargetParam->getType();
- llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
- LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
- // First cast to generic.
- TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
- /*AddrSpace=*/0));
- // Cast from generic to native address space.
- TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
- NativePointeeAddrSpace));
- Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
- CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
- NativeParamType);
- return NativeParamAddr;
-}
-
-void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
- ArrayRef<llvm::Value *> Args) const {
- SmallVector<llvm::Value *, 4> TargetArgs;
- TargetArgs.reserve(Args.size());
- auto *FnType = OutlinedFn.getFunctionType();
- for (unsigned I = 0, E = Args.size(); I < E; ++I) {
- if (FnType->isVarArg() && FnType->getNumParams() <= I) {
- TargetArgs.append(std::next(Args.begin(), I), Args.end());
- break;
- }
- llvm::Type *TargetType = FnType->getParamType(I);
- llvm::Value *NativeArg = Args[I];
- if (!TargetType->isPointerTy()) {
- TargetArgs.emplace_back(NativeArg);
- continue;
- }
- llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- NativeArg,
- NativeArg->getType()->getPointerElementType()->getPointerTo());
- TargetArgs.emplace_back(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
- }
- CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
-}
-
-/// Emit function which wraps the outline parallel region
-/// and controls the arguments which are passed to this function.
-/// The wrapper ensures that the outlined function is called
-/// with the correct arguments when data is shared.
-llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
- llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
- ASTContext &Ctx = CGM.getContext();
- const auto &CS = *D.getCapturedStmt(OMPD_parallel);
-
- // Create a function that takes as argument the source thread.
- FunctionArgList WrapperArgs;
- QualType Int16QTy =
- Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
- QualType Int32QTy =
- Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
- ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
- /*Id=*/nullptr, Int16QTy,
- ImplicitParamDecl::Other);
- ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
- /*Id=*/nullptr, Int32QTy,
- ImplicitParamDecl::Other);
- WrapperArgs.emplace_back(&ParallelLevelArg);
- WrapperArgs.emplace_back(&WrapperArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
-
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
- Fn->setDoesNotRecurse();
-
- CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
- CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
- D.getBeginLoc(), D.getBeginLoc());
-
- const auto *RD = CS.getCapturedRecordDecl();
- auto CurField = RD->field_begin();
-
- Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- // Get the array of arguments.
- SmallVector<llvm::Value *, 8> Args;
-
- Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
- Args.emplace_back(ZeroAddr.getPointer());
-
- CGBuilderTy &Bld = CGF.Builder;
- auto CI = CS.capture_begin();
-
- // Use global memory for data sharing.
- // Handle passing of global args to workers.
- Address GlobalArgs =
- CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
- llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
- llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_get_shared_variables),
- DataSharingArgs);
-
- // Retrieve the shared variables from the list of references returned
- // by the runtime. Pass the variables to the outlined function.
- Address SharedArgListAddress = Address::invalid();
- if (CS.capture_size() > 0 ||
- isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
- SharedArgListAddress = CGF.EmitLoadOfPointer(
- GlobalArgs, CGF.getContext()
- .getPointerType(CGF.getContext().getPointerType(
- CGF.getContext().VoidPtrTy))
- .castAs<PointerType>());
- }
- unsigned Idx = 0;
- if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
- Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
- Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Src, CGF.SizeTy->getPointerTo());
- llvm::Value *LB = CGF.EmitLoadOfScalar(
- TypedAddress,
- /*Volatile=*/false,
- CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
- cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
- Args.emplace_back(LB);
- ++Idx;
- Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
- TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Src, CGF.SizeTy->getPointerTo());
- llvm::Value *UB = CGF.EmitLoadOfScalar(
- TypedAddress,
- /*Volatile=*/false,
- CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
- cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
- Args.emplace_back(UB);
- ++Idx;
- }
- if (CS.capture_size() > 0) {
- ASTContext &CGFContext = CGF.getContext();
- for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
- QualType ElemTy = CurField->getType();
- Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
- Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
- llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
- /*Volatile=*/false,
- CGFContext.getPointerType(ElemTy),
- CI->getLocation());
- if (CI->capturesVariableByCopy() &&
- !CI->getCapturedVar()->getType()->isAnyPointerType()) {
- Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
- CI->getLocation());
- }
- Args.emplace_back(Arg);
- }
- }
-
- emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
- CGF.FinishFunction();
- return Fn;
-}
-
-void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
- const Decl *D) {
- if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
- return;
-
- assert(D && "Expected function or captured|block decl.");
- assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
- "Function is registered already.");
- assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
- "Team is set but not processed.");
- const Stmt *Body = nullptr;
- bool NeedToDelayGlobalization = false;
- if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
- Body = FD->getBody();
- } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
- Body = BD->getBody();
- } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
- Body = CD->getBody();
- NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
- if (NeedToDelayGlobalization &&
- getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
- return;
- }
- if (!Body)
- return;
- CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
- VarChecker.Visit(Body);
- const RecordDecl *GlobalizedVarsRecord =
- VarChecker.getGlobalizedRecord(IsInTTDRegion);
- TeamAndReductions.first = nullptr;
- TeamAndReductions.second.clear();
- ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
- VarChecker.getEscapedVariableLengthDecls();
- if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
- return;
- auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
- I->getSecond().MappedParams =
- std::make_unique<CodeGenFunction::OMPMapVars>();
- I->getSecond().GlobalRecord = GlobalizedVarsRecord;
- I->getSecond().EscapedParameters.insert(
- VarChecker.getEscapedParameters().begin(),
- VarChecker.getEscapedParameters().end());
- I->getSecond().EscapedVariableLengthDecls.append(
- EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
- DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
- for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
- assert(VD->isCanonicalDecl() && "Expected canonical declaration");
- const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
- Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
- }
- if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
- CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
- VarChecker.Visit(Body);
- I->getSecond().SecondaryGlobalRecord =
- VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
- I->getSecond().SecondaryLocalVarData.emplace();
- DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
- for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
- assert(VD->isCanonicalDecl() && "Expected canonical declaration");
- const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
- Data.insert(
- std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
- }
- }
- if (!NeedToDelayGlobalization) {
- emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
- struct GlobalizationScope final : EHScopeStack::Cleanup {
- GlobalizationScope() = default;
-
- void Emit(CodeGenFunction &CGF, Flags flags) override {
- static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
- .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
- }
- };
- CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
- }
-}
-
-Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
- const VarDecl *VD) {
- if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
- const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
- switch (A->getAllocatorType()) {
- // Use the default allocator here as by default local vars are
- // threadlocal.
- case OMPAllocateDeclAttr::OMPNullMemAlloc:
- case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
- case OMPAllocateDeclAttr::OMPThreadMemAlloc:
- case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
- case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
- // Follow the user decision - use default allocation.
- return Address::invalid();
- case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
- // TODO: implement aupport for user-defined allocators.
- return Address::invalid();
- case OMPAllocateDeclAttr::OMPConstMemAlloc: {
- llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
- auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), VarTy, /*isConstant=*/false,
- llvm::GlobalValue::InternalLinkage,
- llvm::Constant::getNullValue(VarTy), VD->getName(),
- /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant));
- CharUnits Align = CGM.getContext().getDeclAlign(VD);
- GV->setAlignment(Align.getAsAlign());
- return Address(GV, Align);
- }
- case OMPAllocateDeclAttr::OMPPTeamMemAlloc: {
- llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
- auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), VarTy, /*isConstant=*/false,
- llvm::GlobalValue::InternalLinkage,
- llvm::Constant::getNullValue(VarTy), VD->getName(),
- /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
- CharUnits Align = CGM.getContext().getDeclAlign(VD);
- GV->setAlignment(Align.getAsAlign());
- return Address(GV, Align);
- }
- case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
- case OMPAllocateDeclAttr::OMPCGroupMemAlloc: {
- llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
- auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), VarTy, /*isConstant=*/false,
- llvm::GlobalValue::InternalLinkage,
- llvm::Constant::getNullValue(VarTy), VD->getName());
- CharUnits Align = CGM.getContext().getDeclAlign(VD);
- GV->setAlignment(Align.getAsAlign());
- return Address(GV, Align);
- }
- }
- }
-
- if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
- return Address::invalid();
-
- VD = VD->getCanonicalDecl();
- auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
- if (I == FunctionGlobalizedDecls.end())
- return Address::invalid();
- auto VDI = I->getSecond().LocalVarData.find(VD);
- if (VDI != I->getSecond().LocalVarData.end())
- return VDI->second.PrivateAddr;
- if (VD->hasAttrs()) {
- for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
- E(VD->attr_end());
- IT != E; ++IT) {
- auto VDI = I->getSecond().LocalVarData.find(
- cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
- ->getCanonicalDecl());
- if (VDI != I->getSecond().LocalVarData.end())
- return VDI->second.PrivateAddr;
- }
- }
-
- return Address::invalid();
-}
-
-void CGOpenMPRuntimeNVPTX::functionFinished(CodeGenFunction &CGF) {
- FunctionGlobalizedDecls.erase(CGF.CurFn);
- CGOpenMPRuntime::functionFinished(CGF);
-}
-
-void CGOpenMPRuntimeNVPTX::getDefaultDistScheduleAndChunk(
- CodeGenFunction &CGF, const OMPLoopDirective &S,
- OpenMPDistScheduleClauseKind &ScheduleKind,
- llvm::Value *&Chunk) const {
- if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
- ScheduleKind = OMPC_DIST_SCHEDULE_static;
- Chunk = CGF.EmitScalarConversion(getNVPTXNumThreads(CGF),
- CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
- S.getIterationVariable()->getType(), S.getBeginLoc());
- return;
- }
- CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
- CGF, S, ScheduleKind, Chunk);
-}
-
-void CGOpenMPRuntimeNVPTX::getDefaultScheduleAndChunk(
- CodeGenFunction &CGF, const OMPLoopDirective &S,
- OpenMPScheduleClauseKind &ScheduleKind,
- const Expr *&ChunkExpr) const {
- ScheduleKind = OMPC_SCHEDULE_static;
- // Chunk size is 1 in this case.
- llvm::APInt ChunkSize(32, 1);
- ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
- CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
- SourceLocation());
-}
-
-void CGOpenMPRuntimeNVPTX::adjustTargetSpecificDataForLambdas(
- CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
- assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
- " Expected target-based directive.");
- const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
- for (const CapturedStmt::Capture &C : CS->captures()) {
- // Capture variables captured by reference in lambdas for target-based
- // directives.
- if (!C.capturesVariable())
- continue;
- const VarDecl *VD = C.getCapturedVar();
- const auto *RD = VD->getType()
- .getCanonicalType()
- .getNonReferenceType()
- ->getAsCXXRecordDecl();
- if (!RD || !RD->isLambda())
- continue;
- Address VDAddr = CGF.GetAddrOfLocalVar(VD);
- LValue VDLVal;
- if (VD->getType().getCanonicalType()->isReferenceType())
- VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
- else
- VDLVal = CGF.MakeAddrLValue(
- VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
- llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
- FieldDecl *ThisCapture = nullptr;
- RD->getCaptureFields(Captures, ThisCapture);
- if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
- LValue ThisLVal =
- CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
- llvm::Value *CXXThis = CGF.LoadCXXThis();
- CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
- }
- for (const LambdaCapture &LC : RD->captures()) {
- if (LC.getCaptureKind() != LCK_ByRef)
- continue;
- const VarDecl *VD = LC.getCapturedVar();
- if (!CS->capturesVariable(VD))
- continue;
- auto It = Captures.find(VD);
- assert(It != Captures.end() && "Found lambda capture without field.");
- LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
- Address VDAddr = CGF.GetAddrOfLocalVar(VD);
- if (VD->getType().getCanonicalType()->isReferenceType())
- VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
- VD->getType().getCanonicalType())
- .getAddress(CGF);
- CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
- }
- }
-}
-
-unsigned CGOpenMPRuntimeNVPTX::getDefaultFirstprivateAddressSpace() const {
- return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
-}
-
-bool CGOpenMPRuntimeNVPTX::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
- LangAS &AS) {
- if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
- return false;
- const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
- switch(A->getAllocatorType()) {
- case OMPAllocateDeclAttr::OMPNullMemAlloc:
- case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
- // Not supported, fallback to the default mem space.
- case OMPAllocateDeclAttr::OMPThreadMemAlloc:
- case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
- case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
- case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
- case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
- AS = LangAS::Default;
- return true;
- case OMPAllocateDeclAttr::OMPConstMemAlloc:
- AS = LangAS::cuda_constant;
- return true;
- case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
- AS = LangAS::cuda_shared;
- return true;
- case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
- llvm_unreachable("Expected predefined allocator for the variables with the "
- "static storage.");
- }
- return false;
-}
-
-// Get current CudaArch and ignore any unknown values
-static CudaArch getCudaArch(CodeGenModule &CGM) {
- if (!CGM.getTarget().hasFeature("ptx"))
- return CudaArch::UNKNOWN;
- llvm::StringMap<bool> Features;
- CGM.getTarget().initFeatureMap(Features, CGM.getDiags(),
- CGM.getTarget().getTargetOpts().CPU,
- CGM.getTarget().getTargetOpts().Features);
- for (const auto &Feature : Features) {
- if (Feature.getValue()) {
- CudaArch Arch = StringToCudaArch(Feature.getKey());
- if (Arch != CudaArch::UNKNOWN)
- return Arch;
- }
- }
- return CudaArch::UNKNOWN;
-}
-
-/// Check to see if target architecture supports unified addressing which is
-/// a restriction for OpenMP requires clause "unified_shared_memory".
-void CGOpenMPRuntimeNVPTX::processRequiresDirective(
- const OMPRequiresDecl *D) {
- for (const OMPClause *Clause : D->clauselists()) {
- if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
- CudaArch Arch = getCudaArch(CGM);
- switch (Arch) {
- case CudaArch::SM_20:
- case CudaArch::SM_21:
- case CudaArch::SM_30:
- case CudaArch::SM_32:
- case CudaArch::SM_35:
- case CudaArch::SM_37:
- case CudaArch::SM_50:
- case CudaArch::SM_52:
- case CudaArch::SM_53:
- case CudaArch::SM_60:
- case CudaArch::SM_61:
- case CudaArch::SM_62: {
- SmallString<256> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
- Out << "Target architecture " << CudaArchToString(Arch)
- << " does not support unified addressing";
- CGM.Error(Clause->getBeginLoc(), Out.str());
- return;
- }
- case CudaArch::SM_70:
- case CudaArch::SM_72:
- case CudaArch::SM_75:
- case CudaArch::SM_80:
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX904:
- case CudaArch::GFX906:
- case CudaArch::GFX908:
- case CudaArch::GFX909:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
- case CudaArch::GFX1030:
- case CudaArch::UNKNOWN:
- break;
- case CudaArch::LAST:
- llvm_unreachable("Unexpected Cuda arch.");
- }
- }
- }
- CGOpenMPRuntime::processRequiresDirective(D);
-}
-
-/// Get number of SMs and number of blocks per SM.
-static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
- std::pair<unsigned, unsigned> Data;
- if (CGM.getLangOpts().OpenMPCUDANumSMs)
- Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
- if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
- Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
- if (Data.first && Data.second)
- return Data;
- switch (getCudaArch(CGM)) {
- case CudaArch::SM_20:
- case CudaArch::SM_21:
- case CudaArch::SM_30:
- case CudaArch::SM_32:
- case CudaArch::SM_35:
- case CudaArch::SM_37:
- case CudaArch::SM_50:
- case CudaArch::SM_52:
- case CudaArch::SM_53:
- return {16, 16};
- case CudaArch::SM_60:
- case CudaArch::SM_61:
- case CudaArch::SM_62:
- return {56, 32};
- case CudaArch::SM_70:
- case CudaArch::SM_72:
- case CudaArch::SM_75:
- case CudaArch::SM_80:
- return {84, 32};
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX904:
- case CudaArch::GFX906:
- case CudaArch::GFX908:
- case CudaArch::GFX909:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
- case CudaArch::GFX1030:
- case CudaArch::UNKNOWN:
- break;
- case CudaArch::LAST:
- llvm_unreachable("Unexpected Cuda arch.");
- }
- llvm_unreachable("Unexpected NVPTX target without ptx feature.");
-}
-
-void CGOpenMPRuntimeNVPTX::clear() {
- if (!GlobalizedRecords.empty() &&
- !CGM.getLangOpts().OpenMPCUDATargetParallel) {
- ASTContext &C = CGM.getContext();
- llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
- llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
- RecordDecl *StaticRD = C.buildImplicitRecord(
- "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
- StaticRD->startDefinition();
- RecordDecl *SharedStaticRD = C.buildImplicitRecord(
- "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
- SharedStaticRD->startDefinition();
- for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
- if (Records.Records.empty())
- continue;
- unsigned Size = 0;
- unsigned RecAlignment = 0;
- for (const RecordDecl *RD : Records.Records) {
- QualType RDTy = C.getRecordType(RD);
- unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
- RecAlignment = std::max(RecAlignment, Alignment);
- unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
- Size =
- llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
- }
- Size = llvm::alignTo(Size, RecAlignment);
- llvm::APInt ArySize(/*numBits=*/64, Size);
- QualType SubTy = C.getConstantArrayType(
- C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
- const bool UseSharedMemory = Size <= SharedMemorySize;
- auto *Field =
- FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
- SourceLocation(), SourceLocation(), nullptr, SubTy,
- C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- if (UseSharedMemory) {
- SharedStaticRD->addDecl(Field);
- SharedRecs.push_back(&Records);
- } else {
- StaticRD->addDecl(Field);
- GlobalRecs.push_back(&Records);
- }
- Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
- Records.UseSharedMemory->setInitializer(
- llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
- }
- // Allocate SharedMemorySize buffer for the shared memory.
- // FIXME: nvlink does not handle weak linkage correctly (object with the
- // different size are reported as erroneous).
- // Restore this code as sson as nvlink is fixed.
- if (!SharedStaticRD->field_empty()) {
- llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
- QualType SubTy = C.getConstantArrayType(
- C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
- auto *Field = FieldDecl::Create(
- C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
- C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- SharedStaticRD->addDecl(Field);
- }
- SharedStaticRD->completeDefinition();
- if (!SharedStaticRD->field_empty()) {
- QualType StaticTy = C.getRecordType(SharedStaticRD);
- llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
- auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), LLVMStaticTy,
- /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
- llvm::Constant::getNullValue(LLVMStaticTy),
- "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
- llvm::GlobalValue::NotThreadLocal,
- C.getTargetAddressSpace(LangAS::cuda_shared));
- auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
- GV, CGM.VoidPtrTy);
- for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
- Rec->Buffer->replaceAllUsesWith(Replacement);
- Rec->Buffer->eraseFromParent();
- }
- }
- StaticRD->completeDefinition();
- if (!StaticRD->field_empty()) {
- QualType StaticTy = C.getRecordType(StaticRD);
- std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
- llvm::APInt Size1(32, SMsBlockPerSM.second);
- QualType Arr1Ty =
- C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- llvm::APInt Size2(32, SMsBlockPerSM.first);
- QualType Arr2Ty =
- C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
- // FIXME: nvlink does not handle weak linkage correctly (object with the
- // different size are reported as erroneous).
- // Restore CommonLinkage as soon as nvlink is fixed.
- auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), LLVMArr2Ty,
- /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
- llvm::Constant::getNullValue(LLVMArr2Ty),
- "_openmp_static_glob_rd_$_");
- auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
- GV, CGM.VoidPtrTy);
- for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
- Rec->Buffer->replaceAllUsesWith(Replacement);
- Rec->Buffer->eraseFromParent();
- }
- }
- }
- if (!TeamsReductions.empty()) {
- ASTContext &C = CGM.getContext();
- RecordDecl *StaticRD = C.buildImplicitRecord(
- "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
- StaticRD->startDefinition();
- for (const RecordDecl *TeamReductionRec : TeamsReductions) {
- QualType RecTy = C.getRecordType(TeamReductionRec);
- auto *Field = FieldDecl::Create(
- C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
- C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- StaticRD->addDecl(Field);
- }
- StaticRD->completeDefinition();
- QualType StaticTy = C.getRecordType(StaticRD);
- llvm::Type *LLVMReductionsBufferTy =
- CGM.getTypes().ConvertTypeForMem(StaticTy);
- // FIXME: nvlink does not handle weak linkage correctly (object with the
- // different size are reported as erroneous).
- // Restore CommonLinkage as soon as nvlink is fixed.
- auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), LLVMReductionsBufferTy,
- /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
- llvm::Constant::getNullValue(LLVMReductionsBufferTy),
- "_openmp_teams_reductions_buffer_$_");
- KernelTeamsReductionPtr->setInitializer(
- llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
- CGM.VoidPtrTy));
- }
- CGOpenMPRuntime::clear();
+ llvm::Function *F;
+ F = llvm::Intrinsic::getDeclaration(
+ &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x);
+ return Bld.CreateCall(F, llvm::None, "nvptx_num_threads");
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
index c52ae43817c7..5f1602959266 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation specialized to NVPTX
-// targets.
+// targets from generalized CGOpenMPRuntimeGPU class.
//
//===----------------------------------------------------------------------===//
@@ -15,473 +15,26 @@
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMENVPTX_H
#include "CGOpenMPRuntime.h"
+#include "CGOpenMPRuntimeGPU.h"
#include "CodeGenFunction.h"
#include "clang/AST/StmtOpenMP.h"
namespace clang {
namespace CodeGen {
-class CGOpenMPRuntimeNVPTX : public CGOpenMPRuntime {
-public:
- /// Defines the execution mode.
- enum ExecutionMode {
- /// SPMD execution mode (all threads are worker threads).
- EM_SPMD,
- /// Non-SPMD execution mode (1 master thread, others are workers).
- EM_NonSPMD,
- /// Unknown execution mode (orphaned directive).
- EM_Unknown,
- };
-private:
- /// Parallel outlined function work for workers to execute.
- llvm::SmallVector<llvm::Function *, 16> Work;
-
- struct EntryFunctionState {
- llvm::BasicBlock *ExitBB = nullptr;
- };
-
- class WorkerFunctionState {
- public:
- llvm::Function *WorkerFn;
- const CGFunctionInfo &CGFI;
- SourceLocation Loc;
-
- WorkerFunctionState(CodeGenModule &CGM, SourceLocation Loc);
-
- private:
- void createWorkerFunction(CodeGenModule &CGM);
- };
-
- ExecutionMode getExecutionMode() const;
-
- bool requiresFullRuntime() const { return RequiresFullRuntime; }
-
- /// Get barrier to synchronize all threads in a block.
- void syncCTAThreads(CodeGenFunction &CGF);
-
- /// Emit the worker function for the current target region.
- void emitWorkerFunction(WorkerFunctionState &WST);
-
- /// Helper for worker function. Emit body of worker loop.
- void emitWorkerLoop(CodeGenFunction &CGF, WorkerFunctionState &WST);
-
- /// Helper for non-SPMD target entry function. Guide the master and
- /// worker threads to their respective locations.
- void emitNonSPMDEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
- WorkerFunctionState &WST);
-
- /// Signal termination of OMP execution for non-SPMD target entry
- /// function.
- void emitNonSPMDEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
-
- /// Helper for generic variables globalization prolog.
- void emitGenericVarsProlog(CodeGenFunction &CGF, SourceLocation Loc,
- bool WithSPMDCheck = false);
-
- /// Helper for generic variables globalization epilog.
- void emitGenericVarsEpilog(CodeGenFunction &CGF, bool WithSPMDCheck = false);
-
- /// Helper for SPMD mode target directive's entry function.
- void emitSPMDEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
- const OMPExecutableDirective &D);
-
- /// Signal termination of SPMD mode execution.
- void emitSPMDEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
-
- //
- // Base class overrides.
- //
-
- /// Creates offloading entry for the provided entry ID \a ID,
- /// address \a Addr, size \a Size, and flags \a Flags.
- void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
- uint64_t Size, int32_t Flags,
- llvm::GlobalValue::LinkageTypes Linkage) override;
-
- /// Emit outlined function specialized for the Fork-Join
- /// programming model for applicable target directives on the NVPTX device.
- /// \param D Directive to emit.
- /// \param ParentName Name of the function that encloses the target region.
- /// \param OutlinedFn Outlined function value to be defined by this call.
- /// \param OutlinedFnID Outlined function ID value to be defined by this call.
- /// \param IsOffloadEntry True if the outlined function is an offload entry.
- /// An outlined function may not be an entry if, e.g. the if clause always
- /// evaluates to false.
- void emitNonSPMDKernel(const OMPExecutableDirective &D, StringRef ParentName,
- llvm::Function *&OutlinedFn,
- llvm::Constant *&OutlinedFnID, bool IsOffloadEntry,
- const RegionCodeGenTy &CodeGen);
-
- /// Emit outlined function specialized for the Single Program
- /// Multiple Data programming model for applicable target directives on the
- /// NVPTX device.
- /// \param D Directive to emit.
- /// \param ParentName Name of the function that encloses the target region.
- /// \param OutlinedFn Outlined function value to be defined by this call.
- /// \param OutlinedFnID Outlined function ID value to be defined by this call.
- /// \param IsOffloadEntry True if the outlined function is an offload entry.
- /// \param CodeGen Object containing the target statements.
- /// An outlined function may not be an entry if, e.g. the if clause always
- /// evaluates to false.
- void emitSPMDKernel(const OMPExecutableDirective &D, StringRef ParentName,
- llvm::Function *&OutlinedFn,
- llvm::Constant *&OutlinedFnID, bool IsOffloadEntry,
- const RegionCodeGenTy &CodeGen);
-
- /// Emit outlined function for 'target' directive on the NVPTX
- /// device.
- /// \param D Directive to emit.
- /// \param ParentName Name of the function that encloses the target region.
- /// \param OutlinedFn Outlined function value to be defined by this call.
- /// \param OutlinedFnID Outlined function ID value to be defined by this call.
- /// \param IsOffloadEntry True if the outlined function is an offload entry.
- /// An outlined function may not be an entry if, e.g. the if clause always
- /// evaluates to false.
- void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
- StringRef ParentName,
- llvm::Function *&OutlinedFn,
- llvm::Constant *&OutlinedFnID,
- bool IsOffloadEntry,
- const RegionCodeGenTy &CodeGen) override;
-
- /// Emits code for parallel or serial call of the \a OutlinedFn with
- /// variables captured in a record which address is stored in \a
- /// CapturedStruct.
- /// This call is for the Non-SPMD Execution Mode.
- /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
- /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
- /// \param CapturedVars A pointer to the record with the references to
- /// variables used in \a OutlinedFn function.
- /// \param IfCond Condition in the associated 'if' clause, if it was
- /// specified, nullptr otherwise.
- void emitNonSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Value *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond);
-
- /// Emits code for parallel or serial call of the \a OutlinedFn with
- /// variables captured in a record which address is stored in \a
- /// CapturedStruct.
- /// This call is for a parallel directive within an SPMD target directive.
- /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
- /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
- /// \param CapturedVars A pointer to the record with the references to
- /// variables used in \a OutlinedFn function.
- /// \param IfCond Condition in the associated 'if' clause, if it was
- /// specified, nullptr otherwise.
- ///
- void emitSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond);
-
-protected:
- /// Get the function name of an outlined region.
- // The name can be customized depending on the target.
- //
- StringRef getOutlinedHelperName() const override {
- return "__omp_outlined__";
- }
-
- /// Check if the default location must be constant.
- /// Constant for NVPTX for better optimization.
- bool isDefaultLocationConstant() const override { return true; }
-
- /// Returns additional flags that can be stored in reserved_2 field of the
- /// default location.
- /// For NVPTX target contains data about SPMD/Non-SPMD execution mode +
- /// Full/Lightweight runtime mode. Used for better optimization.
- unsigned getDefaultLocationReserved2Flags() const override;
+class CGOpenMPRuntimeNVPTX final : public CGOpenMPRuntimeGPU {
public:
explicit CGOpenMPRuntimeNVPTX(CodeGenModule &CGM);
- void clear() override;
-
- /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
- /// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
- virtual void emitProcBindClause(CodeGenFunction &CGF,
- llvm::omp::ProcBindKind ProcBind,
- SourceLocation Loc) override;
-
- /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
- /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
- /// clause.
- /// \param NumThreads An integer value of threads.
- virtual void emitNumThreadsClause(CodeGenFunction &CGF,
- llvm::Value *NumThreads,
- SourceLocation Loc) override;
-
- /// This function ought to emit, in the general case, a call to
- // the openmp runtime kmpc_push_num_teams. In NVPTX backend it is not needed
- // as these numbers are obtained through the PTX grid and block configuration.
- /// \param NumTeams An integer expression of teams.
- /// \param ThreadLimit An integer expression of threads.
- void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
- const Expr *ThreadLimit, SourceLocation Loc) override;
-
- /// Emits inlined function for the specified OpenMP parallel
- // directive.
- /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
- /// kmp_int32 BoundID, struct context_vars*).
- /// \param D OpenMP directive.
- /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
- /// \param InnermostKind Kind of innermost directive (for simple directives it
- /// is a directive itself, for combined - its innermost directive).
- /// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Function *
- emitParallelOutlinedFunction(const OMPExecutableDirective &D,
- const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen) override;
-
- /// Emits inlined function for the specified OpenMP teams
- // directive.
- /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
- /// kmp_int32 BoundID, struct context_vars*).
- /// \param D OpenMP directive.
- /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
- /// \param InnermostKind Kind of innermost directive (for simple directives it
- /// is a directive itself, for combined - its innermost directive).
- /// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Function *
- emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
- const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen) override;
-
- /// Emits code for teams call of the \a OutlinedFn with
- /// variables captured in a record which address is stored in \a
- /// CapturedStruct.
- /// \param OutlinedFn Outlined function to be run by team masters. Type of
- /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
- /// \param CapturedVars A pointer to the record with the references to
- /// variables used in \a OutlinedFn function.
- ///
- void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
- SourceLocation Loc, llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars) override;
-
- /// Emits code for parallel or serial call of the \a OutlinedFn with
- /// variables captured in a record which address is stored in \a
- /// CapturedStruct.
- /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
- /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
- /// \param CapturedVars A pointer to the record with the references to
- /// variables used in \a OutlinedFn function.
- /// \param IfCond Condition in the associated 'if' clause, if it was
- /// specified, nullptr otherwise.
- void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) override;
-
- /// Emit an implicit/explicit barrier for OpenMP threads.
- /// \param Kind Directive for which this implicit barrier call must be
- /// generated. Must be OMPD_barrier for explicit barrier generation.
- /// \param EmitChecks true if need to emit checks for cancellation barriers.
- /// \param ForceSimpleCall true simple barrier call must be emitted, false if
- /// runtime class decides which one to emit (simple or with cancellation
- /// checks).
- ///
- void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPDirectiveKind Kind, bool EmitChecks = true,
- bool ForceSimpleCall = false) override;
-
- /// Emits a critical region.
- /// \param CriticalName Name of the critical region.
- /// \param CriticalOpGen Generator for the statement associated with the given
- /// critical region.
- /// \param Hint Value of the 'hint' clause (optional).
- void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
- const RegionCodeGenTy &CriticalOpGen,
- SourceLocation Loc,
- const Expr *Hint = nullptr) override;
-
- /// Emit a code for reduction clause.
- ///
- /// \param Privates List of private copies for original reduction arguments.
- /// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
- /// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
- /// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
- /// or 'operator binop(LHS, RHS)'.
- /// \param Options List of options for reduction codegen:
- /// WithNowait true if parent directive has also nowait clause, false
- /// otherwise.
- /// SimpleReduction Emit reduction operation only. Used for omp simd
- /// directive on the host.
- /// ReductionKind The kind of reduction to perform.
- virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
- ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps,
- ReductionOptionsTy Options) override;
-
- /// Returns specified OpenMP runtime function for the current OpenMP
- /// implementation. Specialized for the NVPTX device.
- /// \param Function OpenMP runtime function.
- /// \return Specified function.
- llvm::FunctionCallee createNVPTXRuntimeFunction(unsigned Function);
-
- /// Translates the native parameter of outlined function if this is required
- /// for target.
- /// \param FD Field decl from captured record for the parameter.
- /// \param NativeParam Parameter itself.
- const VarDecl *translateParameter(const FieldDecl *FD,
- const VarDecl *NativeParam) const override;
-
- /// Gets the address of the native argument basing on the address of the
- /// target-specific parameter.
- /// \param NativeParam Parameter itself.
- /// \param TargetParam Corresponding target-specific parameter.
- Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
- const VarDecl *TargetParam) const override;
-
- /// Emits call of the outlined function with the provided arguments,
- /// translating these arguments to correct target-specific arguments.
- void emitOutlinedFunctionCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
- ArrayRef<llvm::Value *> Args = llvm::None) const override;
-
- /// Emits OpenMP-specific function prolog.
- /// Required for device constructs.
- void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) override;
-
- /// Gets the OpenMP-specific address of the local variable.
- Address getAddressOfLocalVariable(CodeGenFunction &CGF,
- const VarDecl *VD) override;
-
- /// Target codegen is specialized based on two data-sharing modes: CUDA, in
- /// which the local variables are actually global threadlocal, and Generic, in
- /// which the local variables are placed in global memory if they may escape
- /// their declaration context.
- enum DataSharingMode {
- /// CUDA data sharing mode.
- CUDA,
- /// Generic data-sharing mode.
- Generic,
- };
-
- /// Cleans up references to the objects in finished function.
- ///
- void functionFinished(CodeGenFunction &CGF) override;
-
- /// Choose a default value for the dist_schedule clause.
- void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
- const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
- llvm::Value *&Chunk) const override;
-
- /// Choose a default value for the schedule clause.
- void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
- const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
- const Expr *&ChunkExpr) const override;
-
- /// Adjust some parameters for the target-based directives, like addresses of
- /// the variables captured by reference in lambdas.
- void adjustTargetSpecificDataForLambdas(
- CodeGenFunction &CGF, const OMPExecutableDirective &D) const override;
-
- /// Perform check on requires decl to ensure that target architecture
- /// supports unified addressing
- void processRequiresDirective(const OMPRequiresDecl *D) override;
-
- /// Returns default address space for the constant firstprivates, __constant__
- /// address space by default.
- unsigned getDefaultFirstprivateAddressSpace() const override;
-
- /// Checks if the variable has associated OMPAllocateDeclAttr attribute with
- /// the predefined allocator and translates it into the corresponding address
- /// space.
- bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS) override;
-
-private:
- /// Track the execution mode when codegening directives within a target
- /// region. The appropriate mode (SPMD/NON-SPMD) is set on entry to the
- /// target region and used by containing directives such as 'parallel'
- /// to emit optimized code.
- ExecutionMode CurrentExecutionMode = EM_Unknown;
-
- /// Check if the full runtime is required (default - yes).
- bool RequiresFullRuntime = true;
-
- /// true if we're emitting the code for the target region and next parallel
- /// region is L0 for sure.
- bool IsInTargetMasterThreadRegion = false;
- /// true if currently emitting code for target/teams/distribute region, false
- /// - otherwise.
- bool IsInTTDRegion = false;
- /// true if we're definitely in the parallel region.
- bool IsInParallelRegion = false;
- /// Map between an outlined function and its wrapper.
- llvm::DenseMap<llvm::Function *, llvm::Function *> WrapperFunctionsMap;
+ /// Get the GPU warp size.
+ llvm::Value *getGPUWarpSize(CodeGenFunction &CGF) override;
- /// Emit function which wraps the outline parallel region
- /// and controls the parameters which are passed to this function.
- /// The wrapper ensures that the outlined function is called
- /// with the correct arguments when data is shared.
- llvm::Function *createParallelDataSharingWrapper(
- llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D);
+ /// Get the id of the current thread on the GPU.
+ llvm::Value *getGPUThreadID(CodeGenFunction &CGF) override;
- /// The data for the single globalized variable.
- struct MappedVarData {
- /// Corresponding field in the global record.
- const FieldDecl *FD = nullptr;
- /// Corresponding address.
- Address PrivateAddr = Address::invalid();
- /// true, if only one element is required (for latprivates in SPMD mode),
- /// false, if need to create based on the warp-size.
- bool IsOnePerTeam = false;
- MappedVarData() = delete;
- MappedVarData(const FieldDecl *FD, bool IsOnePerTeam = false)
- : FD(FD), IsOnePerTeam(IsOnePerTeam) {}
- };
- /// The map of local variables to their addresses in the global memory.
- using DeclToAddrMapTy = llvm::MapVector<const Decl *, MappedVarData>;
- /// Set of the parameters passed by value escaping OpenMP context.
- using EscapedParamsTy = llvm::SmallPtrSet<const Decl *, 4>;
- struct FunctionData {
- DeclToAddrMapTy LocalVarData;
- llvm::Optional<DeclToAddrMapTy> SecondaryLocalVarData = llvm::None;
- EscapedParamsTy EscapedParameters;
- llvm::SmallVector<const ValueDecl*, 4> EscapedVariableLengthDecls;
- llvm::SmallVector<llvm::Value *, 4> EscapedVariableLengthDeclsAddrs;
- const RecordDecl *GlobalRecord = nullptr;
- llvm::Optional<const RecordDecl *> SecondaryGlobalRecord = llvm::None;
- llvm::Value *GlobalRecordAddr = nullptr;
- llvm::Value *IsInSPMDModeFlag = nullptr;
- std::unique_ptr<CodeGenFunction::OMPMapVars> MappedParams;
- };
- /// Maps the function to the list of the globalized variables with their
- /// addresses.
- llvm::SmallDenseMap<llvm::Function *, FunctionData> FunctionGlobalizedDecls;
- /// List of records for the globalized variables in target/teams/distribute
- /// contexts. Inner records are going to be joined into the single record,
- /// while those resulting records are going to be joined into the single
- /// union. This resulting union (one per CU) is the entry point for the static
- /// memory management runtime functions.
- struct GlobalPtrSizeRecsTy {
- llvm::GlobalVariable *UseSharedMemory = nullptr;
- llvm::GlobalVariable *RecSize = nullptr;
- llvm::GlobalVariable *Buffer = nullptr;
- SourceLocation Loc;
- llvm::SmallVector<const RecordDecl *, 2> Records;
- unsigned RegionCounter = 0;
- };
- llvm::SmallVector<GlobalPtrSizeRecsTy, 8> GlobalizedRecords;
- llvm::GlobalVariable *KernelTeamsReductionPtr = nullptr;
- /// List of the records with the list of fields for the reductions across the
- /// teams. Used to build the intermediate buffer for the fast teams
- /// reductions.
- /// All the records are gathered into a union `union.type` is created.
- llvm::SmallVector<const RecordDecl *, 4> TeamsReductions;
- /// Shared pointer for the global memory in the global memory buffer used for
- /// the given kernel.
- llvm::GlobalVariable *KernelStaticGlobalized = nullptr;
- /// Pair of the Non-SPMD team and all reductions variables in this team
- /// region.
- std::pair<const Decl *, llvm::SmallVector<const ValueDecl *, 4>>
- TeamAndReductions;
+ /// Get the maximum number of threads in a block of the GPU.
+ llvm::Value *getGPUNumThreads(CodeGenFunction &CGF) override;
};
} // CodeGen namespace.
diff --git a/clang/lib/CodeGen/CGRecordLayout.h b/clang/lib/CodeGen/CGRecordLayout.h
index 730ee4c438e7..e6665b72bcba 100644
--- a/clang/lib/CodeGen/CGRecordLayout.h
+++ b/clang/lib/CodeGen/CGRecordLayout.h
@@ -46,7 +46,7 @@ namespace CodeGen {
/// };
///
/// This will end up as the following LLVM type. The first array is the
-/// bitfield, and the second is the padding out to a 4-byte alignmnet.
+/// bitfield, and the second is the padding out to a 4-byte alignment.
///
/// %t = type { i8, i8, i8, i8, i8, [3 x i8] }
///
@@ -80,8 +80,21 @@ struct CGBitFieldInfo {
/// The offset of the bitfield storage from the start of the struct.
CharUnits StorageOffset;
+ /// The offset within a contiguous run of bitfields that are represented as a
+ /// single "field" within the LLVM struct type, taking into account the AAPCS
+ /// rules for volatile bitfields. This offset is in bits.
+ unsigned VolatileOffset : 16;
+
+ /// The storage size in bits which should be used when accessing this
+ /// bitfield.
+ unsigned VolatileStorageSize;
+
+ /// The offset of the bitfield storage from the start of the struct.
+ CharUnits VolatileStorageOffset;
+
CGBitFieldInfo()
- : Offset(), Size(), IsSigned(), StorageSize(), StorageOffset() {}
+ : Offset(), Size(), IsSigned(), StorageSize(), StorageOffset(),
+ VolatileOffset(), VolatileStorageSize(), VolatileStorageOffset() {}
CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned,
unsigned StorageSize, CharUnits StorageOffset)
diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 4e5d1d3f16f6..cf8313f92587 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -109,6 +109,14 @@ struct CGRecordLowering {
D->isMsStruct(Context);
}
+ /// Helper function to check if we are targeting AAPCS.
+ bool isAAPCS() const {
+ return Context.getTargetInfo().getABI().startswith("aapcs");
+ }
+
+ /// Helper function to check if the target machine is BigEndian.
+ bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
+
/// The Itanium base layout rule allows virtual bases to overlap
/// other bases, which complicates layout in specific ways.
///
@@ -119,15 +127,20 @@ struct CGRecordLowering {
/// Wraps llvm::Type::getIntNTy with some implicit arguments.
llvm::Type *getIntNType(uint64_t NumBits) {
+ unsigned AlignedBits = llvm::alignTo(NumBits, Context.getCharWidth());
+ return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
+ }
+ /// Get the LLVM type sized as one character unit.
+ llvm::Type *getCharType() {
return llvm::Type::getIntNTy(Types.getLLVMContext(),
- (unsigned)llvm::alignTo(NumBits, 8));
+ Context.getCharWidth());
}
- /// Gets an llvm type of size NumBytes and alignment 1.
- llvm::Type *getByteArrayType(CharUnits NumBytes) {
- assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed.");
- llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext());
- return NumBytes == CharUnits::One() ? Type :
- (llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity());
+ /// Gets an llvm type of size NumChars and alignment 1.
+ llvm::Type *getByteArrayType(CharUnits NumChars) {
+ assert(!NumChars.isZero() && "Empty byte arrays aren't allowed.");
+ llvm::Type *Type = getCharType();
+ return NumChars == CharUnits::One() ? Type :
+ (llvm::Type *)llvm::ArrayType::get(Type, NumChars.getQuantity());
}
/// Gets the storage type for a field decl and handles storage
/// for itanium bitfields that are smaller than their declared type.
@@ -172,7 +185,8 @@ struct CGRecordLowering {
void lowerUnion();
void accumulateFields();
void accumulateBitFields(RecordDecl::field_iterator Field,
- RecordDecl::field_iterator FieldEnd);
+ RecordDecl::field_iterator FieldEnd);
+ void computeVolatileBitfields();
void accumulateBases();
void accumulateVPtrs();
void accumulateVBases();
@@ -237,6 +251,10 @@ void CGRecordLowering::setBitFieldInfo(
// least-significant-bit.
if (DataLayout.isBigEndian())
Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
+
+ Info.VolatileStorageSize = 0;
+ Info.VolatileOffset = 0;
+ Info.VolatileStorageOffset = CharUnits::Zero();
}
void CGRecordLowering::lower(bool NVBaseType) {
@@ -261,15 +279,21 @@ void CGRecordLowering::lower(bool NVBaseType) {
// 8) Format the complete list of members in a way that can be consumed by
// CodeGenTypes::ComputeRecordLayout.
CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
- if (D->isUnion())
- return lowerUnion();
+ if (D->isUnion()) {
+ lowerUnion();
+ computeVolatileBitfields();
+ return;
+ }
accumulateFields();
// RD implies C++.
if (RD) {
accumulateVPtrs();
accumulateBases();
- if (Members.empty())
- return appendPaddingBytes(Size);
+ if (Members.empty()) {
+ appendPaddingBytes(Size);
+ computeVolatileBitfields();
+ return;
+ }
if (!NVBaseType)
accumulateVBases();
}
@@ -281,6 +305,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
Members.pop_back();
calculateZeroInit();
fillOutputFields();
+ computeVolatileBitfields();
}
void CGRecordLowering::lowerUnion() {
@@ -418,9 +443,9 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
!DataLayout.fitsInLegalInteger(OffsetInRecord))
return false;
- // Make sure StartBitOffset is natually aligned if it is treated as an
+ // Make sure StartBitOffset is naturally aligned if it is treated as an
// IType integer.
- if (StartBitOffset %
+ if (StartBitOffset %
Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
0)
return false;
@@ -503,6 +528,123 @@ void CGRecordLowering::accumulateBases() {
}
}
+/// The AAPCS that defines that, when possible, bit-fields should
+/// be accessed using containers of the declared type width:
+/// When a volatile bit-field is read, and its container does not overlap with
+/// any non-bit-field member or any zero length bit-field member, its container
+/// must be read exactly once using the access width appropriate to the type of
+/// the container. When a volatile bit-field is written, and its container does
+/// not overlap with any non-bit-field member or any zero-length bit-field
+/// member, its container must be read exactly once and written exactly once
+/// using the access width appropriate to the type of the container. The two
+/// accesses are not atomic.
+///
+/// Enforcing the width restriction can be disabled using
+/// -fno-aapcs-bitfield-width.
+void CGRecordLowering::computeVolatileBitfields() {
+ if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
+ return;
+
+ for (auto &I : BitFields) {
+ const FieldDecl *Field = I.first;
+ CGBitFieldInfo &Info = I.second;
+ llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
+ // If the record alignment is less than the type width, we can't enforce a
+ // aligned load, bail out.
+ if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
+ ResLTy->getPrimitiveSizeInBits())
+ continue;
+ // CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
+ // for big-endian targets, but it assumes a container of width
+ // Info.StorageSize. Since AAPCS uses a different container size (width
+ // of the type), we first undo that calculation here and redo it once
+ // the bit-field offset within the new container is calculated.
+ const unsigned OldOffset =
+ isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
+ // Offset to the bit-field from the beginning of the struct.
+ const unsigned AbsoluteOffset =
+ Context.toBits(Info.StorageOffset) + OldOffset;
+
+ // Container size is the width of the bit-field type.
+ const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
+ // Nothing to do if the access uses the desired
+ // container width and is naturally aligned.
+ if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
+ continue;
+
+ // Offset within the container.
+ unsigned Offset = AbsoluteOffset & (StorageSize - 1);
+ // Bail out if an aligned load of the container cannot cover the entire
+ // bit-field. This can happen for example, if the bit-field is part of a
+ // packed struct. AAPCS does not define access rules for such cases, we let
+ // clang to follow its own rules.
+ if (Offset + Info.Size > StorageSize)
+ continue;
+
+ // Re-adjust offsets for big-endian targets.
+ if (isBE())
+ Offset = StorageSize - (Offset + Info.Size);
+
+ const CharUnits StorageOffset =
+ Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
+ const CharUnits End = StorageOffset +
+ Context.toCharUnitsFromBits(StorageSize) -
+ CharUnits::One();
+
+ const ASTRecordLayout &Layout =
+ Context.getASTRecordLayout(Field->getParent());
+ // If we access outside memory outside the record, than bail out.
+ const CharUnits RecordSize = Layout.getSize();
+ if (End >= RecordSize)
+ continue;
+
+ // Bail out if performing this load would access non-bit-fields members.
+ bool Conflict = false;
+ for (const auto *F : D->fields()) {
+ // Allow sized bit-fields overlaps.
+ if (F->isBitField() && !F->isZeroLengthBitField(Context))
+ continue;
+
+ const CharUnits FOffset = Context.toCharUnitsFromBits(
+ Layout.getFieldOffset(F->getFieldIndex()));
+
+ // As C11 defines, a zero sized bit-field defines a barrier, so
+ // fields after and before it should be race condition free.
+ // The AAPCS acknowledges it and imposes no restritions when the
+ // natural container overlaps a zero-length bit-field.
+ if (F->isZeroLengthBitField(Context)) {
+ if (End > FOffset && StorageOffset < FOffset) {
+ Conflict = true;
+ break;
+ }
+ }
+
+ const CharUnits FEnd =
+ FOffset +
+ Context.toCharUnitsFromBits(
+ Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
+ CharUnits::One();
+ // If no overlap, continue.
+ if (End < FOffset || FEnd < StorageOffset)
+ continue;
+
+ // The desired load overlaps a non-bit-field member, bail out.
+ Conflict = true;
+ break;
+ }
+
+ if (Conflict)
+ continue;
+ // Write the new bit-field access parameters.
+ // As the storage offset now is defined as the number of elements from the
+ // start of the structure, we should divide the Offset by the element size.
+ Info.VolatileStorageOffset =
+ StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
+ Info.VolatileStorageSize = StorageSize;
+ Info.VolatileOffset = Offset;
+ }
+}
+
void CGRecordLowering::accumulateVPtrs() {
if (Layout.hasOwnVFPtr())
Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
@@ -605,7 +747,7 @@ void CGRecordLowering::clipTailPadding() {
assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
"should not have reused this field's tail padding");
Prior->Data = getByteArrayType(
- Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).first);
+ Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width);
}
}
if (Member->Data)
@@ -848,8 +990,10 @@ CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
assert(Info.StorageSize <= SL->getSizeInBits() &&
"Union not large enough for bitfield storage");
} else {
- assert(Info.StorageSize ==
- getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
+ assert((Info.StorageSize ==
+ getDataLayout().getTypeAllocSizeInBits(ElementTy) ||
+ Info.VolatileStorageSize ==
+ getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
"Storage size does not match the element type size");
}
assert(Info.Size > 0 && "Empty bitfield!");
@@ -897,11 +1041,12 @@ LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
void CGBitFieldInfo::print(raw_ostream &OS) const {
OS << "<CGBitFieldInfo"
- << " Offset:" << Offset
- << " Size:" << Size
- << " IsSigned:" << IsSigned
+ << " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned
<< " StorageSize:" << StorageSize
- << " StorageOffset:" << StorageOffset.getQuantity() << ">";
+ << " StorageOffset:" << StorageOffset.getQuantity()
+ << " VolatileOffset:" << VolatileOffset
+ << " VolatileStorageSize:" << VolatileStorageSize
+ << " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
}
LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 672909849bb7..a1a72a9f668d 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -18,9 +18,11 @@
#include "clang/AST/Attr.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
@@ -50,7 +52,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
PGO.setCurrentStmt(S);
// These statements have their own debug info handling.
- if (EmitSimpleStmt(S))
+ if (EmitSimpleStmt(S, Attrs))
return;
// Check if we are generating unreachable code.
@@ -370,23 +372,44 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
}
}
-bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
+bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
+ ArrayRef<const Attr *> Attrs) {
switch (S->getStmtClass()) {
- default: return false;
- case Stmt::NullStmtClass: break;
- case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
- case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
- case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
+ default:
+ return false;
+ case Stmt::NullStmtClass:
+ break;
+ case Stmt::CompoundStmtClass:
+ EmitCompoundStmt(cast<CompoundStmt>(*S));
+ break;
+ case Stmt::DeclStmtClass:
+ EmitDeclStmt(cast<DeclStmt>(*S));
+ break;
+ case Stmt::LabelStmtClass:
+ EmitLabelStmt(cast<LabelStmt>(*S));
+ break;
case Stmt::AttributedStmtClass:
- EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
- case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
- case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
- case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
- case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
- case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
- case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
+ EmitAttributedStmt(cast<AttributedStmt>(*S));
+ break;
+ case Stmt::GotoStmtClass:
+ EmitGotoStmt(cast<GotoStmt>(*S));
+ break;
+ case Stmt::BreakStmtClass:
+ EmitBreakStmt(cast<BreakStmt>(*S));
+ break;
+ case Stmt::ContinueStmtClass:
+ EmitContinueStmt(cast<ContinueStmt>(*S));
+ break;
+ case Stmt::DefaultStmtClass:
+ EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
+ break;
+ case Stmt::CaseStmtClass:
+ EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
+ break;
+ case Stmt::SEHLeaveStmtClass:
+ EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
+ break;
}
-
return true;
}
@@ -695,8 +718,14 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
if (S.getElse())
ElseBlock = createBasicBlock("if.else");
- EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
- getProfileCount(S.getThen()));
+ // Prefer the PGO based weights over the likelihood attribute.
+ // When the build isn't optimized the metadata isn't used, so don't generate
+ // it.
+ Stmt::Likelihood LH = Stmt::LH_None;
+ uint64_t Count = getProfileCount(S.getThen());
+ if (!Count && CGM.getCodeGenOpts().OptimizationLevel)
+ LH = Stmt::getLikelihood(S.getThen(), S.getElse());
+ EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH);
// Emit the 'then' code.
EmitBlock(ThenBlock);
@@ -736,11 +765,6 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
EmitBlock(LoopHeader.getBlock());
- const SourceRange &R = S.getSourceRange();
- LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
- WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
- SourceLocToDebugLoc(R.getEnd()));
-
// Create an exit block for when the condition fails, which will
// also become the break target.
JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
@@ -768,9 +792,19 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
// while(1) is common, avoid extra exit blocks. Be sure
// to correctly handle break/continue though.
bool EmitBoolCondBranch = true;
- if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
- if (C->isOne())
+ bool LoopMustProgress = false;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) {
+ if (C->isOne()) {
EmitBoolCondBranch = false;
+ FnIsMustProgress = false;
+ }
+ } else if (LanguageRequiresProgress())
+ LoopMustProgress = true;
+
+ const SourceRange &R = S.getSourceRange();
+ LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
+ WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()), LoopMustProgress);
// As long as the condition is true, go to the loop body.
llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
@@ -778,14 +812,22 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (ConditionScope.requiresCleanups())
ExitBlock = createBasicBlock("while.exit");
- Builder.CreateCondBr(
- BoolCondVal, LoopBody, ExitBlock,
- createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
+ llvm::MDNode *Weights = createProfileOrBranchWeightsForLoop(
+ S.getCond(), getProfileCount(S.getBody()), S.getBody());
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
EmitBranchThroughCleanup(LoopExit);
}
+ } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
+ CGM.getDiags().Report(A->getLocation(),
+ diag::warn_attribute_has_no_effect_on_infinite_loop)
+ << A << A->getRange();
+ CGM.getDiags().Report(
+ S.getWhileLoc(),
+ diag::note_attribute_has_no_effect_on_infinite_loop_here)
+ << SourceRange(S.getWhileLoc(), S.getRParenLoc());
}
// Emit the loop body. We have to emit this in a cleanup scope
@@ -838,11 +880,6 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
EmitBlock(LoopCond.getBlock());
- const SourceRange &R = S.getSourceRange();
- LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
- SourceLocToDebugLoc(R.getBegin()),
- SourceLocToDebugLoc(R.getEnd()));
-
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
// after each execution of the loop body."
@@ -856,9 +893,19 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// "do {} while (0)" is common in macros, avoid extra blocks. Be sure
// to correctly handle break/continue though.
bool EmitBoolCondBranch = true;
- if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ bool LoopMustProgress = false;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) {
if (C->isZero())
EmitBoolCondBranch = false;
+ else if (C->isOne())
+ FnIsMustProgress = false;
+ } else if (LanguageRequiresProgress())
+ LoopMustProgress = true;
+
+ const SourceRange &R = S.getSourceRange();
+ LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
+ SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()), LoopMustProgress);
// As long as the condition is true, iterate the loop.
if (EmitBoolCondBranch) {
@@ -896,10 +943,20 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
llvm::BasicBlock *CondBlock = Continue.getBlock();
EmitBlock(CondBlock);
+ bool LoopMustProgress = false;
+ Expr::EvalResult Result;
+ if (LanguageRequiresProgress()) {
+ if (!S.getCond()) {
+ FnIsMustProgress = false;
+ } else if (!S.getCond()->EvaluateAsInt(Result, getContext())) {
+ LoopMustProgress = true;
+ }
+ }
+
const SourceRange &R = S.getSourceRange();
LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
SourceLocToDebugLoc(R.getBegin()),
- SourceLocToDebugLoc(R.getEnd()));
+ SourceLocToDebugLoc(R.getEnd()), LoopMustProgress);
// If the for loop doesn't have an increment we can just use the
// condition as the continue block. Otherwise we'll need to create
@@ -933,9 +990,14 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
// C99 6.8.5p2/p4: The first substatement is executed if the expression
// compares unequal to 0. The condition must be a scalar type.
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
- Builder.CreateCondBr(
- BoolCondVal, ForBody, ExitBlock,
- createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
+ llvm::MDNode *Weights = createProfileOrBranchWeightsForLoop(
+ S.getCond(), getProfileCount(S.getBody()), S.getBody());
+
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isOne())
+ FnIsMustProgress = false;
+
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
@@ -1014,9 +1076,9 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
// The body is executed if the expression, contextually converted
// to bool, is true.
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
- Builder.CreateCondBr(
- BoolCondVal, ForBody, ExitBlock,
- createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
+ llvm::MDNode *Weights = createProfileOrBranchWeightsForLoop(
+ S.getCond(), getProfileCount(S.getBody()), S.getBody());
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
@@ -1215,7 +1277,8 @@ void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
/// EmitCaseStmtRange - If case statement range is not too big then
/// add multiple cases to switch instruction, one for each value within
/// the range. If range is too big then emit "if" condition check.
-void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
+void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
+ ArrayRef<const Attr *> Attrs) {
assert(S.getRHS() && "Expected RHS value in CaseStmt");
llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
@@ -1232,6 +1295,7 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
return;
+ Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
llvm::APInt Range = RHS - LHS;
// FIXME: parameters such as this should not be hardcoded.
if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
@@ -1246,6 +1310,9 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
for (unsigned I = 0; I != NCases; ++I) {
if (SwitchWeights)
SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
+ else if (SwitchLikelihood)
+ SwitchLikelihood->push_back(LH);
+
if (Rem)
Rem--;
SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
@@ -1283,7 +1350,9 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
// need to update the weight for the default, ie, the first case, to include
// this case.
(*SwitchWeights)[0] += ThisCount;
- }
+ } else if (SwitchLikelihood)
+ Weights = createBranchWeights(LH);
+
Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
// Restore the appropriate insertion point.
@@ -1293,7 +1362,8 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
Builder.ClearInsertionPoint();
}
-void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
+ ArrayRef<const Attr *> Attrs) {
// If there is no enclosing switch instance that we're aware of, then this
// case statement and its block can be elided. This situation only happens
// when we've constant-folded the switch, are emitting the constant case,
@@ -1306,12 +1376,14 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
// Handle case ranges.
if (S.getRHS()) {
- EmitCaseStmtRange(S);
+ EmitCaseStmtRange(S, Attrs);
return;
}
llvm::ConstantInt *CaseVal =
Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
+ if (SwitchLikelihood)
+ SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
// If the body of the case is just a 'break', try to not emit an empty block.
// If we're profiling or we're not optimizing, leave the block in for better
@@ -1352,6 +1424,10 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
// that falls through to the next case which is IR intensive. It also causes
// deep recursion which can run into stack depth limitations. Handle
// sequential non-range case statements specially.
+ //
+ // TODO When the next case has a likelihood attribute the code returns to the
+ // recursive algorithm. Maybe improve this case if it becomes common practice
+ // to use a lot of attributes.
const CaseStmt *CurCase = &S;
const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
@@ -1365,8 +1441,12 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
SwitchWeights->push_back(getProfileCount(NextCase));
if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
CaseDest = createBasicBlock("sw.bb");
- EmitBlockWithFallThrough(CaseDest, &S);
+ EmitBlockWithFallThrough(CaseDest, CurCase);
}
+ // Since this loop is only executed when the CaseStmt has no attributes
+ // use a hard-coded value.
+ if (SwitchLikelihood)
+ SwitchLikelihood->push_back(Stmt::LH_None);
SwitchInsn->addCase(CaseVal, CaseDest);
NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
@@ -1376,7 +1456,8 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
EmitStmt(CurCase->getSubStmt());
}
-void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
+void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
+ ArrayRef<const Attr *> Attrs) {
// If there is no enclosing switch instance that we're aware of, then this
// default statement can be elided. This situation only happens when we've
// constant-folded the switch.
@@ -1389,6 +1470,9 @@ void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
assert(DefaultBlock->empty() &&
"EmitDefaultStmt: Default block already defined?");
+ if (SwitchLikelihood)
+ SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
+
EmitBlockWithFallThrough(DefaultBlock, &S);
EmitStmt(S.getSubStmt());
@@ -1626,10 +1710,67 @@ static bool FindCaseStatementsForValue(const SwitchStmt &S,
FoundCase;
}
+static Optional<SmallVector<uint64_t, 16>>
+getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
+ // Are there enough branches to weight them?
+ if (Likelihoods.size() <= 1)
+ return None;
+
+ uint64_t NumUnlikely = 0;
+ uint64_t NumNone = 0;
+ uint64_t NumLikely = 0;
+ for (const auto LH : Likelihoods) {
+ switch (LH) {
+ case Stmt::LH_Unlikely:
+ ++NumUnlikely;
+ break;
+ case Stmt::LH_None:
+ ++NumNone;
+ break;
+ case Stmt::LH_Likely:
+ ++NumLikely;
+ break;
+ }
+ }
+
+ // Is there a likelihood attribute used?
+ if (NumUnlikely == 0 && NumLikely == 0)
+ return None;
+
+ // When multiple cases share the same code they can be combined during
+ // optimization. In that case the weights of the branch will be the sum of
+ // the individual weights. Make sure the combined sum of all neutral cases
+ // doesn't exceed the value of a single likely attribute.
+ // The additions both avoid divisions by 0 and make sure the weights of None
+ // don't exceed the weight of Likely.
+ const uint64_t Likely = INT32_MAX / (NumLikely + 2);
+ const uint64_t None = Likely / (NumNone + 1);
+ const uint64_t Unlikely = 0;
+
+ SmallVector<uint64_t, 16> Result;
+ Result.reserve(Likelihoods.size());
+ for (const auto LH : Likelihoods) {
+ switch (LH) {
+ case Stmt::LH_Unlikely:
+ Result.push_back(Unlikely);
+ break;
+ case Stmt::LH_None:
+ Result.push_back(None);
+ break;
+ case Stmt::LH_Likely:
+ Result.push_back(Likely);
+ break;
+ }
+ }
+
+ return Result;
+}
+
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// Handle nested switch statements.
llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
+ SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
// See if we can constant fold the condition of the switch and therefore only
@@ -1704,7 +1845,12 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// The default needs to be first. We store the edge count, so we already
// know the right weight.
SwitchWeights->push_back(DefaultCount);
+ } else if (CGM.getCodeGenOpts().OptimizationLevel) {
+ SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
+ // Initialize the default case.
+ SwitchLikelihood->push_back(Stmt::LH_None);
}
+
CaseRangeBlock = DefaultBlock;
// Clear the insertion point to indicate we are in unreachable code.
@@ -1768,9 +1914,21 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
createProfileWeights(*SwitchWeights));
delete SwitchWeights;
+ } else if (SwitchLikelihood) {
+ assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
+ "switch likelihoods do not match switch cases");
+ Optional<SmallVector<uint64_t, 16>> LHW =
+ getLikelihoodWeights(*SwitchLikelihood);
+ if (LHW) {
+ llvm::MDBuilder MDHelper(CGM.getLLVMContext());
+ SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
+ createProfileWeights(*LHW));
+ }
+ delete SwitchLikelihood;
}
SwitchInsn = SavedSwitchInsn;
SwitchWeights = SavedSwitchWeights;
+ SwitchLikelihood = SavedSwitchLikelihood;
CaseRangeBlock = SavedCRBlock;
}
@@ -1830,7 +1988,8 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
static std::string
AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
const TargetInfo &Target, CodeGenModule &CGM,
- const AsmStmt &Stmt, const bool EarlyClobber) {
+ const AsmStmt &Stmt, const bool EarlyClobber,
+ std::string *GCCReg = nullptr) {
const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
if (!AsmDeclRef)
return Constraint;
@@ -1855,6 +2014,8 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
}
// Canonicalize the register here before returning it.
Register = Target.getNormalizedGCCRegisterName(Register);
+ if (GCCReg != nullptr)
+ *GCCReg = Register.str();
return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
}
@@ -1954,12 +2115,16 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
}
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
- bool ReadOnly, bool ReadNone, const AsmStmt &S,
+ bool ReadOnly, bool ReadNone, bool NoMerge,
+ const AsmStmt &S,
const std::vector<llvm::Type *> &ResultRegTypes,
CodeGenFunction &CGF,
std::vector<llvm::Value *> &RegResults) {
Result.addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoUnwind);
+ if (NoMerge)
+ Result.addAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoMerge);
// Attach readnone and readonly attributes.
if (!HasSideEffect) {
if (ReadNone)
@@ -2049,6 +2214,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Keep track of out constraints for tied input operand.
std::vector<std::string> OutputConstraints;
+ // Keep track of defined physregs.
+ llvm::SmallSet<std::string, 8> PhysRegOutputs;
+
// An inline asm can be marked readonly if it meets the following conditions:
// - it doesn't have any sideeffects
// - it doesn't clobber memory
@@ -2068,9 +2236,15 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const Expr *OutExpr = S.getOutputExpr(i);
OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+ std::string GCCReg;
OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
getTarget(), CGM, S,
- Info.earlyClobber());
+ Info.earlyClobber(),
+ &GCCReg);
+ // Give an error on multiple outputs to same physreg.
+ if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
+ CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
+
OutputConstraints.push_back(OutputConstraint);
LValue Dest = EmitLValue(OutExpr);
if (!Constraints.empty())
@@ -2132,8 +2306,21 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
} else {
- ArgTypes.push_back(Dest.getAddress(*this).getType());
- Args.push_back(Dest.getPointer(*this));
+ llvm::Type *DestAddrTy = Dest.getAddress(*this).getType();
+ llvm::Value *DestPtr = Dest.getPointer(*this);
+ // Matrix types in memory are represented by arrays, but accessed through
+ // vector pointers, with the alignment specified on the access operation.
+ // For inline assembly, update pointer arguments to use vector pointers.
+ // Otherwise there will be a mis-match if the matrix is also an
+ // input-argument which is represented as vector.
+ if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) {
+ DestAddrTy = llvm::PointerType::get(
+ ConvertType(OutExpr->getType()),
+ cast<llvm::PointerType>(DestAddrTy)->getAddressSpace());
+ DestPtr = Builder.CreateBitCast(DestPtr, DestAddrTy);
+ }
+ ArgTypes.push_back(DestAddrTy);
+ Args.push_back(DestPtr);
Constraints += "=*";
Constraints += OutputConstraint;
ReadOnly = ReadNone = false;
@@ -2157,7 +2344,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
- if (Info.allowsRegister())
+ // Only tie earlyclobber physregs.
+ if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
InOutConstraints += llvm::utostr(i);
else
InOutConstraints += OutputConstraint;
@@ -2334,12 +2522,14 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
EmitBlock(Fallthrough);
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
- ReadNone, S, ResultRegTypes, *this, RegResults);
+ ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes,
+ *this, RegResults);
} else {
llvm::CallInst *Result =
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
- ReadNone, S, ResultRegTypes, *this, RegResults);
+ ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes,
+ *this, RegResults);
}
assert(RegResults.size() == ResultRegTypes.size());
@@ -2412,8 +2602,7 @@ LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
I != E; ++I, ++CurField) {
LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
if (CurField->hasCapturedVLAType()) {
- auto VAT = CurField->getCapturedVLAType();
- EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
+ EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
} else {
EmitInitializerForField(*CurField, LV, *I);
}
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index cfd5eda8cc80..5e8d98cfe5ef 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtOpenMP.h"
+#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
@@ -1562,6 +1563,17 @@ static void emitCommonOMPParallelDirective(
CapturedVars, IfCond);
}
+static bool isAllocatableDecl(const VarDecl *VD) {
+ const VarDecl *CVD = VD->getCanonicalDecl();
+ if (!CVD->hasAttr<OMPAllocateDeclAttr>())
+ return false;
+ const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
+ // Use the default allocation.
+ return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
+ AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
+ !AA->getAllocator());
+}
+
static void emitEmptyBoundParameters(CodeGenFunction &,
const OMPExecutableDirective &,
llvm::SmallVectorImpl<llvm::Value *> &) {}
@@ -1574,12 +1586,7 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
if (!VD)
return Address::invalid();
const VarDecl *CVD = VD->getCanonicalDecl();
- if (!CVD->hasAttr<OMPAllocateDeclAttr>())
- return Address::invalid();
- const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
- // Use the default allocation.
- if (AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
- !AA->getAllocator())
+ if (!isAllocatableDecl(CVD))
return Address::invalid();
llvm::Value *Size;
CharUnits Align = CGM.getContext().getDeclAlign(CVD);
@@ -1595,6 +1602,7 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
Size = CGM.getSize(Sz.alignTo(Align));
}
+ const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
assert(AA->getAllocator() &&
"Expected allocator expression for non-default allocator.");
llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
@@ -1606,11 +1614,11 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator,
CGM.VoidPtrTy);
- llvm::Value *Addr = OMPBuilder.CreateOMPAlloc(
+ llvm::Value *Addr = OMPBuilder.createOMPAlloc(
CGF.Builder, Size, Allocator,
getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", "."));
llvm::CallInst *FreeCI =
- OMPBuilder.CreateOMPFree(CGF.Builder, Addr, Allocator);
+ OMPBuilder.createOMPFree(CGF.Builder, Addr, Allocator);
CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI);
Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
@@ -1638,7 +1646,7 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix);
llvm::CallInst *ThreadPrivateCacheCall =
- OMPBuilder.CreateCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
+ OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
return Address(ThreadPrivateCacheCall, VDAddr.getAlignment());
}
@@ -1685,7 +1693,7 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
//
// TODO: This defaults to shared right now.
auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
- llvm::Value &Val, llvm::Value *&ReplVal) {
+ llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
// The next line is appropriate only for variables (Val) with the
// data-sharing attribute "shared".
ReplVal = &Val;
@@ -1707,9 +1715,11 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
- Builder.restoreIP(OMPBuilder.CreateParallel(Builder, BodyGenCB, PrivCB,
- FiniCB, IfCond, NumThreads,
- ProcBind, S.hasCancel()));
+ llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
+ AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
+ Builder.restoreIP(
+ OMPBuilder.createParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB,
+ IfCond, NumThreads, ProcBind, S.hasCancel()));
return;
}
@@ -2979,7 +2989,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
((ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) &&
!(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
- ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
+ ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
@@ -3552,12 +3562,9 @@ void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
}
void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
- auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
- };
- OMPLexicalScope Scope(*this, S, OMPD_unknown);
- CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
- S.hasCancel());
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitStopPoint(&S);
+ EmitStmt(S.getAssociatedStmt());
}
void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
@@ -3608,7 +3615,7 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
- CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
+ CGF.EmitStmt(S.getRawStmt());
};
CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
}
@@ -3618,8 +3625,7 @@ void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
- const CapturedStmt *CS = S.getInnermostCapturedStmt();
- const Stmt *MasterRegionBodyStmt = CS->getCapturedStmt();
+ const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt();
auto FiniCB = [this](InsertPointTy IP) {
OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
@@ -3633,13 +3639,14 @@ void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
CodeGenIP, FiniBB);
};
- CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
- Builder.restoreIP(OMPBuilder.CreateMaster(Builder, BodyGenCB, FiniCB));
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitStopPoint(&S);
+ Builder.restoreIP(OMPBuilder.createMaster(Builder, BodyGenCB, FiniCB));
return;
}
- OMPLexicalScope Scope(*this, S, OMPD_unknown);
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitStopPoint(&S);
emitMaster(*this, S);
}
@@ -3648,8 +3655,7 @@ void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
- const CapturedStmt *CS = S.getInnermostCapturedStmt();
- const Stmt *CriticalRegionBodyStmt = CS->getCapturedStmt();
+ const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt();
const Expr *Hint = nullptr;
if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
Hint = HintClause->getHint();
@@ -3674,9 +3680,9 @@ void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
CodeGenIP, FiniBB);
};
- CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
- Builder.restoreIP(OMPBuilder.CreateCritical(
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitStopPoint(&S);
+ Builder.restoreIP(OMPBuilder.createCritical(
Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(),
HintInst));
@@ -3685,12 +3691,13 @@ void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
- CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
+ CGF.EmitStmt(S.getAssociatedStmt());
};
const Expr *Hint = nullptr;
if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
Hint = HintClause->getHint();
- OMPLexicalScope Scope(*this, S, OMPD_unknown);
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitStopPoint(&S);
CGM.getOpenMPRuntime().emitCriticalRegion(*this,
S.getDirectiveName().getAsString(),
CodeGen, S.getBeginLoc(), Hint);
@@ -3785,6 +3792,42 @@ void CodeGenFunction::EmitOMPParallelSectionsDirective(
checkForLastprivateConditionalUpdate(*this, S);
}
+namespace {
+/// Get the list of variables declared in the context of the untied tasks.
+class CheckVarsEscapingUntiedTaskDeclContext final
+ : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> {
+ llvm::SmallVector<const VarDecl *, 4> PrivateDecls;
+
+public:
+ explicit CheckVarsEscapingUntiedTaskDeclContext() = default;
+ virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default;
+ void VisitDeclStmt(const DeclStmt *S) {
+ if (!S)
+ return;
+ // Need to privatize only local vars, static locals can be processed as is.
+ for (const Decl *D : S->decls()) {
+ if (const auto *VD = dyn_cast_or_null<VarDecl>(D))
+ if (VD->hasLocalStorage())
+ PrivateDecls.push_back(VD);
+ }
+ }
+ void VisitOMPExecutableDirective(const OMPExecutableDirective *) { return; }
+ void VisitCapturedStmt(const CapturedStmt *) { return; }
+ void VisitLambdaExpr(const LambdaExpr *) { return; }
+ void VisitBlockExpr(const BlockExpr *) { return; }
+ void VisitStmt(const Stmt *S) {
+ if (!S)
+ return;
+ for (const Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+ }
+
+ /// Swaps list of vars with the provided one.
+ ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; }
+};
+} // anonymous namespace
+
void CodeGenFunction::EmitOMPTaskBasedDirective(
const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
@@ -3885,14 +3928,23 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
}
+ // Get list of local vars for untied tasks.
+ if (!Data.Tied) {
+ CheckVarsEscapingUntiedTaskDeclContext Checker;
+ Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt());
+ Data.PrivateLocals.append(Checker.getPrivateDecls().begin(),
+ Checker.getPrivateDecls().end());
+ }
auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
CapturedRegion](CodeGenFunction &CGF,
PrePostActionTy &Action) {
+ llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>>
+ UntiedLocalVars;
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs;
if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
- !Data.LastprivateVars.empty()) {
+ !Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) {
llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
enum { PrivatesParam = 2, CopyFnParam = 3 };
@@ -3928,6 +3980,17 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
+ for (const VarDecl *VD : Data.PrivateLocals) {
+ QualType Ty = VD->getType().getNonReferenceType();
+ if (VD->getType()->isLValueReferenceType())
+ Ty = CGF.getContext().getPointerType(Ty);
+ if (isAllocatableDecl(VD))
+ Ty = CGF.getContext().getPointerType(Ty);
+ Address PrivatePtr = CGF.CreateMemTemp(
+ CGF.getContext().getPointerType(Ty), ".local.ptr.addr");
+ UntiedLocalVars.try_emplace(VD, PrivatePtr, Address::invalid());
+ CallArgs.push_back(PrivatePtr.getPointer());
+ }
CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
for (const auto &Pair : LastprivateDstsOrigs) {
@@ -3946,6 +4009,22 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.getContext().getDeclAlign(Pair.first));
Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
}
+ // Adjust mapping for internal locals by mapping actual memory instead of
+ // a pointer to this memory.
+ for (auto &Pair : UntiedLocalVars) {
+ if (isAllocatableDecl(Pair.first)) {
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
+ Address Replacement(Ptr, CGF.getPointerAlign());
+ Pair.getSecond().first = Replacement;
+ Ptr = CGF.Builder.CreateLoad(Replacement);
+ Replacement = Address(Ptr, CGF.getContext().getDeclAlign(Pair.first));
+ Pair.getSecond().second = Replacement;
+ } else {
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
+ Address Replacement(Ptr, CGF.getContext().getDeclAlign(Pair.first));
+ Pair.getSecond().first = Replacement;
+ }
+ }
}
if (Data.Reductions) {
OMPPrivateScope FirstprivateScope(CGF);
@@ -4040,6 +4119,8 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
}
(void)InRedScope.Privatize();
+ CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF,
+ UntiedLocalVars);
Action.Enter(CGF);
BodyGen(CGF);
};
@@ -4075,7 +4156,7 @@ createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data,
PrivateVD->setInitStyle(VarDecl::CInit);
PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
InitRef, /*BasePath=*/nullptr,
- VK_RValue));
+ VK_RValue, FPOptionsOverride()));
Data.FirstprivateVars.emplace_back(OrigRef);
Data.FirstprivateCopies.emplace_back(PrivateRef);
Data.FirstprivateInits.emplace_back(InitRef);
@@ -4111,17 +4192,18 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
VarDecl *BPVD = nullptr;
VarDecl *PVD = nullptr;
VarDecl *SVD = nullptr;
+ VarDecl *MVD = nullptr;
if (InputInfo.NumberOfTargetItems > 0) {
auto *CD = CapturedDecl::Create(
getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0);
llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems);
- QualType BaseAndPointersType = getContext().getConstantArrayType(
+ QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType(
getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal,
/*IndexTypeQuals=*/0);
BPVD = createImplicitFirstprivateForType(
- getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
+ getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
PVD = createImplicitFirstprivateForType(
- getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
+ getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
QualType SizesType = getContext().getConstantArrayType(
getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1),
ArrSize, nullptr, ArrayType::Normal,
@@ -4134,6 +4216,15 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
[&InputInfo]() { return InputInfo.PointersArray; });
TargetScope.addPrivate(SVD,
[&InputInfo]() { return InputInfo.SizesArray; });
+ // If there is no user-defined mapper, the mapper array will be nullptr. In
+ // this case, we don't need to privatize it.
+ if (!dyn_cast_or_null<llvm::ConstantPointerNull>(
+ InputInfo.MappersArray.getPointer())) {
+ MVD = createImplicitFirstprivateForType(
+ getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
+ TargetScope.addPrivate(MVD,
+ [&InputInfo]() { return InputInfo.MappersArray; });
+ }
}
(void)TargetScope.Privatize();
// Build list of dependences.
@@ -4142,7 +4233,7 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
}
- auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD,
+ auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD,
&InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
@@ -4183,6 +4274,10 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
CGF.GetAddrOfLocalVar(PVD), /*Index=*/0);
InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
CGF.GetAddrOfLocalVar(SVD), /*Index=*/0);
+ // If MVD is nullptr, the mapper array is not privatized
+ if (MVD)
+ InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP(
+ CGF.GetAddrOfLocalVar(MVD), /*Index=*/0);
}
Action.Enter(CGF);
@@ -4769,7 +4864,7 @@ static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
if (S.hasClausesOfKind<OMPDependClause>()) {
- assert(!S.getAssociatedStmt() &&
+ assert(!S.hasAssociatedStmt() &&
"No associated statement must be in ordered depend construct.");
for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
@@ -5359,17 +5454,11 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
}
}
- const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
-
- auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF,
- PrePostActionTy &) {
- CGF.EmitStopPoint(CS);
- emitOMPAtomicExpr(CGF, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(),
- S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(),
- S.getBeginLoc());
- };
- OMPLexicalScope Scope(*this, S, OMPD_unknown);
- CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitStopPoint(S.getAssociatedStmt());
+ emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(),
+ S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(),
+ S.getBeginLoc());
}
static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
@@ -5887,7 +5976,7 @@ void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
IfCondition = EmitScalarExpr(IfCond,
/*IgnoreResultAssign=*/true);
return Builder.restoreIP(
- OMPBuilder.CreateCancel(Builder, IfCondition, S.getCancelRegion()));
+ OMPBuilder.createCancel(Builder, IfCondition, S.getCancelRegion()));
}
}
@@ -6030,7 +6119,8 @@ void CodeGenFunction::EmitOMPUseDeviceAddrClause(
// Generate the instructions for '#pragma omp target data' directive.
void CodeGenFunction::EmitOMPTargetDataDirective(
const OMPTargetDataDirective &S) {
- CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true);
+ CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true,
+ /*SeparateBeginEndCalls=*/true);
// Create a pre/post action to signal the privatization of the device pointer.
// This action can be replaced by the OpenMP runtime code generation to
@@ -6621,7 +6711,12 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
}
};
- {
+ if (D.getDirectiveKind() == OMPD_atomic ||
+ D.getDirectiveKind() == OMPD_critical ||
+ D.getDirectiveKind() == OMPD_section ||
+ D.getDirectiveKind() == OMPD_master) {
+ EmitStmt(D.getAssociatedStmt());
+ } else {
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D);
OMPSimdLexicalScope Scope(*this, D);
diff --git a/clang/lib/CodeGen/CGVTT.cpp b/clang/lib/CodeGen/CGVTT.cpp
index e79f3f3dd8bc..564d9f354e64 100644
--- a/clang/lib/CodeGen/CGVTT.cpp
+++ b/clang/lib/CodeGen/CGVTT.cpp
@@ -42,10 +42,8 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
llvm::GlobalVariable::LinkageTypes Linkage,
const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/true);
-
- llvm::Type *Int8PtrTy = CGM.Int8PtrTy, *Int32Ty = CGM.Int32Ty;
llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+ llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
SmallVector<llvm::GlobalVariable *, 8> VTables;
SmallVector<VTableAddressPointsMapTy, 8> VTableAddressPoints;
@@ -74,16 +72,17 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
}
llvm::Value *Idxs[] = {
- llvm::ConstantInt::get(Int32Ty, 0),
- llvm::ConstantInt::get(Int32Ty, AddressPoint.VTableIndex),
- llvm::ConstantInt::get(Int32Ty, AddressPoint.AddressPointIndex),
+ llvm::ConstantInt::get(CGM.Int32Ty, 0),
+ llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
+ llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
};
llvm::Constant *Init = llvm::ConstantExpr::getGetElementPtr(
VTable->getValueType(), VTable, Idxs, /*InBounds=*/true,
/*InRangeIndex=*/1);
- Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+ Init = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(Init,
+ CGM.Int8PtrTy);
VTTComponents.push_back(Init);
}
diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp
index 65b3b0c5f53d..bef9a293b7ed 100644
--- a/clang/lib/CodeGen/CGVTables.cpp
+++ b/clang/lib/CodeGen/CGVTables.cpp
@@ -641,7 +641,7 @@ void CodeGenVTables::addRelativeComponent(ConstantArrayBuilder &builder,
llvm::Constant *target;
if (auto *func = dyn_cast<llvm::Function>(globalVal)) {
- target = getOrCreateRelativeStub(func, stubLinkage, isCompleteDtor);
+ target = llvm::DSOLocalEquivalent::get(func);
} else {
llvm::SmallString<16> rttiProxyName(globalVal->getName());
rttiProxyName.append(".rtti_proxy");
@@ -669,74 +669,6 @@ void CodeGenVTables::addRelativeComponent(ConstantArrayBuilder &builder,
/*position=*/vtableAddressPoint);
}
-llvm::Function *CodeGenVTables::getOrCreateRelativeStub(
- llvm::Function *func, llvm::GlobalValue::LinkageTypes stubLinkage,
- bool isCompleteDtor) const {
- // A complete object destructor can later be substituted in the vtable for an
- // appropriate base object destructor when optimizations are enabled. This can
- // happen for child classes that don't have their own destructor. In the case
- // where a parent virtual destructor is not guaranteed to be in the same
- // linkage unit as the child vtable, it's possible for an external reference
- // for this destructor to be substituted into the child vtable, preventing it
- // from being in rodata. If this function is a complete virtual destructor, we
- // can just force a stub to be emitted for it.
- if (func->isDSOLocal() && !isCompleteDtor)
- return func;
-
- llvm::SmallString<16> stubName(func->getName());
- stubName.append(".stub");
-
- // Instead of taking the offset between the vtable and virtual function
- // directly, we emit a dso_local stub that just contains a tail call to the
- // original virtual function and take the offset between that and the
- // vtable. We do this because there are some cases where the original
- // function that would've been inserted into the vtable is not dso_local
- // which may require some kind of dynamic relocation which prevents the
- // vtable from being readonly. On x86_64, taking the offset between the
- // function and the vtable gets lowered to the offset between the PLT entry
- // for the function and the vtable which gives us a PLT32 reloc. On AArch64,
- // right now only CALL26 and JUMP26 instructions generate PLT relocations,
- // so we manifest them with stubs that are just jumps to the original
- // function.
- auto &module = CGM.getModule();
- llvm::Function *stub = module.getFunction(stubName);
- if (stub) {
- assert(stub->isDSOLocal() &&
- "The previous definition of this stub should've been dso_local.");
- return stub;
- }
-
- stub = llvm::Function::Create(func->getFunctionType(), stubLinkage, stubName,
- module);
-
- // Propogate function attributes.
- stub->setAttributes(func->getAttributes());
-
- stub->setDSOLocal(true);
- stub->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- if (!stub->hasLocalLinkage()) {
- stub->setVisibility(llvm::GlobalValue::HiddenVisibility);
- stub->setComdat(module.getOrInsertComdat(stubName));
- }
-
- // Fill the stub with a tail call that will be optimized.
- llvm::BasicBlock *block =
- llvm::BasicBlock::Create(module.getContext(), "entry", stub);
- llvm::IRBuilder<> block_builder(block);
- llvm::SmallVector<llvm::Value *, 8> args;
- for (auto &arg : stub->args())
- args.push_back(&arg);
- llvm::CallInst *call = block_builder.CreateCall(func, args);
- call->setAttributes(func->getAttributes());
- call->setTailCall();
- if (call->getType()->isVoidTy())
- block_builder.CreateRetVoid();
- else
- block_builder.CreateRet(call);
-
- return stub;
-}
-
bool CodeGenVTables::useRelativeLayout() const {
return CGM.getTarget().getCXXABI().isItaniumFamily() &&
CGM.getItaniumVTableContext().isRelativeLayout();
@@ -1294,8 +1226,16 @@ bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) {
return !HasLTOVisibilityPublicStd(RD);
}
-llvm::GlobalObject::VCallVisibility
-CodeGenModule::GetVCallVisibilityLevel(const CXXRecordDecl *RD) {
+llvm::GlobalObject::VCallVisibility CodeGenModule::GetVCallVisibilityLevel(
+ const CXXRecordDecl *RD, llvm::DenseSet<const CXXRecordDecl *> &Visited) {
+ // If we have already visited this RD (which means this is a recursive call
+ // since the initial call should have an empty Visited set), return the max
+ // visibility. The recursive calls below compute the min between the result
+ // of the recursive call and the current TypeVis, so returning the max here
+ // ensures that it will have no effect on the current TypeVis.
+ if (!Visited.insert(RD).second)
+ return llvm::GlobalObject::VCallVisibilityTranslationUnit;
+
LinkageInfo LV = RD->getLinkageAndVisibility();
llvm::GlobalObject::VCallVisibility TypeVis;
if (!isExternallyVisible(LV.getLinkage()))
@@ -1307,13 +1247,15 @@ CodeGenModule::GetVCallVisibilityLevel(const CXXRecordDecl *RD) {
for (auto B : RD->bases())
if (B.getType()->getAsCXXRecordDecl()->isDynamicClass())
- TypeVis = std::min(TypeVis,
- GetVCallVisibilityLevel(B.getType()->getAsCXXRecordDecl()));
+ TypeVis = std::min(
+ TypeVis,
+ GetVCallVisibilityLevel(B.getType()->getAsCXXRecordDecl(), Visited));
for (auto B : RD->vbases())
if (B.getType()->getAsCXXRecordDecl()->isDynamicClass())
- TypeVis = std::min(TypeVis,
- GetVCallVisibilityLevel(B.getType()->getAsCXXRecordDecl()));
+ TypeVis = std::min(
+ TypeVis,
+ GetVCallVisibilityLevel(B.getType()->getAsCXXRecordDecl(), Visited));
return TypeVis;
}
@@ -1382,7 +1324,9 @@ void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
if (getCodeGenOpts().VirtualFunctionElimination ||
getCodeGenOpts().WholeProgramVTables) {
- llvm::GlobalObject::VCallVisibility TypeVis = GetVCallVisibilityLevel(RD);
+ llvm::DenseSet<const CXXRecordDecl *> Visited;
+ llvm::GlobalObject::VCallVisibility TypeVis =
+ GetVCallVisibilityLevel(RD, Visited);
if (TypeVis != llvm::GlobalObject::VCallVisibilityPublic)
VTable->setVCallVisibilityMetadata(TypeVis);
}
diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h
index 70e6fed3f4f6..4b39a0520833 100644
--- a/clang/lib/CodeGen/CGValue.h
+++ b/clang/lib/CodeGen/CGValue.h
@@ -653,7 +653,7 @@ public:
/// is the type size unless that might overlap another object, in which
/// case it's the dsize.
CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const {
- return mayOverlap() ? Ctx.getTypeInfoDataSizeInChars(Type).first
+ return mayOverlap() ? Ctx.getTypeInfoDataSizeInChars(Type).Width
: Ctx.getTypeSizeInChars(Type);
}
};
diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp
index 55925110708e..778d4df3c2e9 100644
--- a/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/clang/lib/CodeGen/CodeGenAction.cpp
@@ -35,6 +35,7 @@
#include "llvm/IR/LLVMRemarkStreamer.h"
#include "llvm/IR/Module.h"
#include "llvm/IRReader/IRReader.h"
+#include "llvm/LTO/LTOBackend.h"
#include "llvm/Linker/Linker.h"
#include "llvm/Pass.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -121,6 +122,8 @@ namespace clang {
/// can happen when Clang plugins trigger additional AST deserialization.
bool IRGenFinished = false;
+ bool TimerIsEnabled = false;
+
std::unique_ptr<CodeGenerator> Gen;
SmallVector<LinkModule, 4> LinkModules;
@@ -135,8 +138,7 @@ namespace clang {
const PreprocessorOptions &PPOpts,
const CodeGenOptions &CodeGenOpts,
const TargetOptions &TargetOpts,
- const LangOptions &LangOpts, bool TimePasses,
- const std::string &InFile,
+ const LangOptions &LangOpts, const std::string &InFile,
SmallVector<LinkModule, 4> LinkModules,
std::unique_ptr<raw_pwrite_stream> OS, LLVMContext &C,
CoverageSourceInfo *CoverageInfo = nullptr)
@@ -148,8 +150,9 @@ namespace clang {
Gen(CreateLLVMCodeGen(Diags, InFile, HeaderSearchOpts, PPOpts,
CodeGenOpts, C, CoverageInfo)),
LinkModules(std::move(LinkModules)) {
- FrontendTimesIsEnabled = TimePasses;
- llvm::TimePassesIsEnabled = TimePasses;
+ TimerIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
}
// This constructor is used in installing an empty BackendConsumer
@@ -160,7 +163,7 @@ namespace clang {
const PreprocessorOptions &PPOpts,
const CodeGenOptions &CodeGenOpts,
const TargetOptions &TargetOpts,
- const LangOptions &LangOpts, bool TimePasses,
+ const LangOptions &LangOpts,
SmallVector<LinkModule, 4> LinkModules, LLVMContext &C,
CoverageSourceInfo *CoverageInfo = nullptr)
: Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
@@ -171,8 +174,9 @@ namespace clang {
Gen(CreateLLVMCodeGen(Diags, "", HeaderSearchOpts, PPOpts,
CodeGenOpts, C, CoverageInfo)),
LinkModules(std::move(LinkModules)) {
- FrontendTimesIsEnabled = TimePasses;
- llvm::TimePassesIsEnabled = TimePasses;
+ TimerIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
}
llvm::Module *getModule() const { return Gen->GetModule(); }
std::unique_ptr<llvm::Module> takeModule() {
@@ -190,12 +194,12 @@ namespace clang {
Context = &Ctx;
- if (FrontendTimesIsEnabled)
+ if (TimerIsEnabled)
LLVMIRGeneration.startTimer();
Gen->Initialize(Ctx);
- if (FrontendTimesIsEnabled)
+ if (TimerIsEnabled)
LLVMIRGeneration.stopTimer();
}
@@ -205,7 +209,7 @@ namespace clang {
"LLVM IR generation of declaration");
// Recurse.
- if (FrontendTimesIsEnabled) {
+ if (TimerIsEnabled) {
LLVMIRGenerationRefCount += 1;
if (LLVMIRGenerationRefCount == 1)
LLVMIRGeneration.startTimer();
@@ -213,7 +217,7 @@ namespace clang {
Gen->HandleTopLevelDecl(D);
- if (FrontendTimesIsEnabled) {
+ if (TimerIsEnabled) {
LLVMIRGenerationRefCount -= 1;
if (LLVMIRGenerationRefCount == 0)
LLVMIRGeneration.stopTimer();
@@ -226,12 +230,12 @@ namespace clang {
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
Context->getSourceManager(),
"LLVM IR generation of inline function");
- if (FrontendTimesIsEnabled)
+ if (TimerIsEnabled)
LLVMIRGeneration.startTimer();
Gen->HandleInlineFunctionDefinition(D);
- if (FrontendTimesIsEnabled)
+ if (TimerIsEnabled)
LLVMIRGeneration.stopTimer();
}
@@ -245,8 +249,13 @@ namespace clang {
bool LinkInModules() {
for (auto &LM : LinkModules) {
if (LM.PropagateAttrs)
- for (Function &F : *LM.Module)
+ for (Function &F : *LM.Module) {
+ // Skip intrinsics. Keep consistent with how intrinsics are created
+ // in LLVM IR.
+ if (F.isIntrinsic())
+ continue;
Gen->CGM().addDefaultFunctionDefinitionAttributes(F);
+ }
CurLinkModule = LM.Module.get();
@@ -274,7 +283,7 @@ namespace clang {
{
llvm::TimeTraceScope TimeScope("Frontend");
PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
- if (FrontendTimesIsEnabled) {
+ if (TimerIsEnabled) {
LLVMIRGenerationRefCount += 1;
if (LLVMIRGenerationRefCount == 1)
LLVMIRGeneration.startTimer();
@@ -282,7 +291,7 @@ namespace clang {
Gen->HandleTranslationUnit(C);
- if (FrontendTimesIsEnabled) {
+ if (TimerIsEnabled) {
LLVMIRGenerationRefCount -= 1;
if (LLVMIRGenerationRefCount == 0)
LLVMIRGeneration.stopTimer();
@@ -398,9 +407,6 @@ namespace clang {
bool StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D);
/// Specialized handler for unsupported backend feature diagnostic.
void UnsupportedDiagHandler(const llvm::DiagnosticInfoUnsupported &D);
- /// Specialized handler for misexpect warnings.
- /// Note that misexpect remarks are emitted through ORE
- void MisExpectDiagHandler(const llvm::DiagnosticInfoMisExpect &D);
/// Specialized handlers for optimization remarks.
/// Note that these handlers only accept remarks and they always handle
/// them.
@@ -668,36 +674,6 @@ void BackendConsumer::UnsupportedDiagHandler(
<< Filename << Line << Column;
}
-void BackendConsumer::MisExpectDiagHandler(
- const llvm::DiagnosticInfoMisExpect &D) {
- StringRef Filename;
- unsigned Line, Column;
- bool BadDebugInfo = false;
- FullSourceLoc Loc;
- std::string Msg;
- raw_string_ostream MsgStream(Msg);
- DiagnosticPrinterRawOStream DP(MsgStream);
-
- // Context will be nullptr for IR input files, we will construct the diag
- // message from llvm::DiagnosticInfoMisExpect.
- if (Context != nullptr) {
- Loc = getBestLocationFromDebugLoc(D, BadDebugInfo, Filename, Line, Column);
- MsgStream << D.getMsg();
- } else {
- DiagnosticPrinterRawOStream DP(MsgStream);
- D.print(DP);
- }
- Diags.Report(Loc, diag::warn_profile_data_misexpect) << MsgStream.str();
-
- if (BadDebugInfo)
- // If we were not able to translate the file:line:col information
- // back to a SourceLocation, at least emit a note stating that
- // we could not translate this location. This can happen in the
- // case of #line directives.
- Diags.Report(Loc, diag::note_fe_backend_invalid_loc)
- << Filename << Line << Column;
-}
-
void BackendConsumer::EmitOptimizationMessage(
const llvm::DiagnosticInfoOptimizationBase &D, unsigned DiagID) {
// We only support warnings and remarks.
@@ -875,9 +851,6 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
case llvm::DK_Unsupported:
UnsupportedDiagHandler(cast<DiagnosticInfoUnsupported>(DI));
return;
- case llvm::DK_MisExpect:
- MisExpectDiagHandler(cast<DiagnosticInfoMisExpect>(DI));
- return;
default:
// Plugin IDs are not bound to any value as they are set dynamically.
ComputeDiagRemarkID(Severity, backend_plugin, DiagID);
@@ -990,17 +963,15 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CoverageSourceInfo *CoverageInfo = nullptr;
// Add the preprocessor callback only when the coverage mapping is generated.
- if (CI.getCodeGenOpts().CoverageMapping) {
- CoverageInfo = new CoverageSourceInfo;
- CI.getPreprocessor().addPPCallbacks(
- std::unique_ptr<PPCallbacks>(CoverageInfo));
- }
+ if (CI.getCodeGenOpts().CoverageMapping)
+ CoverageInfo = CodeGen::CoverageMappingModuleGen::setUpCoverageCallbacks(
+ CI.getPreprocessor());
std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
CI.getPreprocessorOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, std::string(InFile),
- std::move(LinkModules), std::move(OS), *VMContext, CoverageInfo));
+ CI.getLangOpts(), std::string(InFile), std::move(LinkModules),
+ std::move(OS), *VMContext, CoverageInfo));
BEConsumer = Result.get();
// Enable generating macro debug info only when debug info is not disabled and
@@ -1063,7 +1034,7 @@ CodeGenAction::loadModule(MemoryBufferRef MBRef) {
Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef);
if (!BMsOrErr)
return DiagErrors(BMsOrErr.takeError());
- BitcodeModule *Bm = FindThinLTOModule(*BMsOrErr);
+ BitcodeModule *Bm = llvm::lto::findThinLTOModule(*BMsOrErr);
// We have nothing to do if the file contains no ThinLTO module. This is
// possible if ThinLTO compilation was not able to split module. Content of
// the file was already processed by indexing and will be passed to the
@@ -1107,81 +1078,74 @@ CodeGenAction::loadModule(MemoryBufferRef MBRef) {
}
void CodeGenAction::ExecuteAction() {
- // If this is an IR file, we have to treat it specially.
- if (getCurrentFileKind().getLanguage() == Language::LLVM_IR) {
- BackendAction BA = static_cast<BackendAction>(Act);
- CompilerInstance &CI = getCompilerInstance();
- auto &CodeGenOpts = CI.getCodeGenOpts();
- auto &Diagnostics = CI.getDiagnostics();
- std::unique_ptr<raw_pwrite_stream> OS =
- GetOutputStream(CI, getCurrentFile(), BA);
- if (BA != Backend_EmitNothing && !OS)
- return;
-
- bool Invalid;
- SourceManager &SM = CI.getSourceManager();
- FileID FID = SM.getMainFileID();
- const llvm::MemoryBuffer *MainFile = SM.getBuffer(FID, &Invalid);
- if (Invalid)
- return;
+ if (getCurrentFileKind().getLanguage() != Language::LLVM_IR) {
+ this->ASTFrontendAction::ExecuteAction();
+ return;
+ }
- TheModule = loadModule(*MainFile);
- if (!TheModule)
- return;
+ // If this is an IR file, we have to treat it specially.
+ BackendAction BA = static_cast<BackendAction>(Act);
+ CompilerInstance &CI = getCompilerInstance();
+ auto &CodeGenOpts = CI.getCodeGenOpts();
+ auto &Diagnostics = CI.getDiagnostics();
+ std::unique_ptr<raw_pwrite_stream> OS =
+ GetOutputStream(CI, getCurrentFile(), BA);
+ if (BA != Backend_EmitNothing && !OS)
+ return;
- const TargetOptions &TargetOpts = CI.getTargetOpts();
- if (TheModule->getTargetTriple() != TargetOpts.Triple) {
- Diagnostics.Report(SourceLocation(),
- diag::warn_fe_override_module)
- << TargetOpts.Triple;
- TheModule->setTargetTriple(TargetOpts.Triple);
- }
+ SourceManager &SM = CI.getSourceManager();
+ FileID FID = SM.getMainFileID();
+ Optional<MemoryBufferRef> MainFile = SM.getBufferOrNone(FID);
+ if (!MainFile)
+ return;
- EmbedBitcode(TheModule.get(), CodeGenOpts,
- MainFile->getMemBufferRef());
-
- LLVMContext &Ctx = TheModule->getContext();
- Ctx.setInlineAsmDiagnosticHandler(BitcodeInlineAsmDiagHandler,
- &Diagnostics);
-
- // Set clang diagnostic handler. To do this we need to create a fake
- // BackendConsumer.
- BackendConsumer Result(BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
- CI.getPreprocessorOpts(), CI.getCodeGenOpts(),
- CI.getTargetOpts(), CI.getLangOpts(),
- CI.getFrontendOpts().ShowTimers,
- std::move(LinkModules), *VMContext, nullptr);
- // PR44896: Force DiscardValueNames as false. DiscardValueNames cannot be
- // true here because the valued names are needed for reading textual IR.
- Ctx.setDiscardValueNames(false);
- Ctx.setDiagnosticHandler(
- std::make_unique<ClangDiagnosticHandler>(CodeGenOpts, &Result));
-
- Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
- setupLLVMOptimizationRemarks(
- Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
- CodeGenOpts.OptRecordFormat, CodeGenOpts.DiagnosticsWithHotness,
- CodeGenOpts.DiagnosticsHotnessThreshold);
-
- if (Error E = OptRecordFileOrErr.takeError()) {
- reportOptRecordError(std::move(E), Diagnostics, CodeGenOpts);
- return;
- }
- std::unique_ptr<llvm::ToolOutputFile> OptRecordFile =
- std::move(*OptRecordFileOrErr);
+ TheModule = loadModule(*MainFile);
+ if (!TheModule)
+ return;
- EmitBackendOutput(Diagnostics, CI.getHeaderSearchOpts(), CodeGenOpts,
- TargetOpts, CI.getLangOpts(),
- CI.getTarget().getDataLayout(), TheModule.get(), BA,
- std::move(OS));
+ const TargetOptions &TargetOpts = CI.getTargetOpts();
+ if (TheModule->getTargetTriple() != TargetOpts.Triple) {
+ Diagnostics.Report(SourceLocation(), diag::warn_fe_override_module)
+ << TargetOpts.Triple;
+ TheModule->setTargetTriple(TargetOpts.Triple);
+ }
- if (OptRecordFile)
- OptRecordFile->keep();
+ EmbedBitcode(TheModule.get(), CodeGenOpts, *MainFile);
+
+ LLVMContext &Ctx = TheModule->getContext();
+ Ctx.setInlineAsmDiagnosticHandler(BitcodeInlineAsmDiagHandler, &Diagnostics);
+
+ // Set clang diagnostic handler. To do this we need to create a fake
+ // BackendConsumer.
+ BackendConsumer Result(BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
+ CI.getPreprocessorOpts(), CI.getCodeGenOpts(),
+ CI.getTargetOpts(), CI.getLangOpts(),
+ std::move(LinkModules), *VMContext, nullptr);
+ // PR44896: Force DiscardValueNames as false. DiscardValueNames cannot be
+ // true here because the valued names are needed for reading textual IR.
+ Ctx.setDiscardValueNames(false);
+ Ctx.setDiagnosticHandler(
+ std::make_unique<ClangDiagnosticHandler>(CodeGenOpts, &Result));
+
+ Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
+ setupLLVMOptimizationRemarks(
+ Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
+ CodeGenOpts.OptRecordFormat, CodeGenOpts.DiagnosticsWithHotness,
+ CodeGenOpts.DiagnosticsHotnessThreshold);
+
+ if (Error E = OptRecordFileOrErr.takeError()) {
+ reportOptRecordError(std::move(E), Diagnostics, CodeGenOpts);
return;
}
-
- // Otherwise follow the normal AST path.
- this->ASTFrontendAction::ExecuteAction();
+ std::unique_ptr<llvm::ToolOutputFile> OptRecordFile =
+ std::move(*OptRecordFileOrErr);
+
+ EmitBackendOutput(Diagnostics, CI.getHeaderSearchOpts(), CodeGenOpts,
+ TargetOpts, CI.getLangOpts(),
+ CI.getTarget().getDataLayout(), TheModule.get(), BA,
+ std::move(OS));
+ if (OptRecordFile)
+ OptRecordFile->keep();
}
//
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index 4a7c84562dee..b393c88f7751 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -25,6 +25,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Builtins.h"
@@ -32,6 +33,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
@@ -40,6 +42,8 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Operator.h"
+#include "llvm/Support/CRC.h"
+#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
using namespace clang;
using namespace CodeGen;
@@ -128,10 +132,24 @@ void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
}
CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
+ const Expr *E)
+ : CGF(CGF) {
+ ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
+}
+
+CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
FPOptions FPFeatures)
- : CGF(CGF), OldFPFeatures(CGF.CurFPFeatures) {
+ : CGF(CGF) {
+ ConstructorHelper(FPFeatures);
+}
+
+void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
+ OldFPFeatures = CGF.CurFPFeatures;
CGF.CurFPFeatures = FPFeatures;
+ OldExcept = CGF.Builder.getDefaultConstrainedExcept();
+ OldRounding = CGF.Builder.getDefaultConstrainedRounding();
+
if (OldFPFeatures == FPFeatures)
return;
@@ -172,6 +190,8 @@ CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
CGF.CurFPFeatures = OldFPFeatures;
+ CGF.Builder.setDefaultConstrainedExcept(OldExcept);
+ CGF.Builder.setDefaultConstrainedRounding(OldRounding);
}
LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
@@ -772,13 +792,16 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
SanOpts.Mask &= ~SanitizerKind::Null;
// Apply xray attributes to the function (as a string, for now)
+ bool AlwaysXRayAttr = false;
if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
XRayInstrKind::FunctionEntry) ||
CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
XRayInstrKind::FunctionExit)) {
- if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
+ if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
Fn->addFnAttr("function-instrument", "xray-always");
+ AlwaysXRayAttr = true;
+ }
if (XRayAttr->neverXRayInstrument())
Fn->addFnAttr("function-instrument", "xray-never");
if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
@@ -804,8 +827,22 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
XRayInstrKind::FunctionEntry))
Fn->addFnAttr("xray-skip-entry");
+
+ auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
+ if (FuncGroups > 1) {
+ auto FuncName = llvm::makeArrayRef<uint8_t>(
+ CurFn->getName().bytes_begin(), CurFn->getName().bytes_end());
+ auto Group = crc32(FuncName) % FuncGroups;
+ if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
+ !AlwaysXRayAttr)
+ Fn->addFnAttr("function-instrument", "xray-never");
+ }
}
+ if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone)
+ if (CGM.isProfileInstrExcluded(Fn, Loc))
+ Fn->addFnAttr(llvm::Attribute::NoProfile);
+
unsigned Count, Offset;
if (const auto *Attr =
D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
@@ -899,8 +936,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
}
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- Builder.setIsFPConstrained(FD->usesFPIntrin());
- if (FD->usesFPIntrin())
+ Builder.setIsFPConstrained(FD->hasAttr<StrictFPAttr>());
+ if (FD->hasAttr<StrictFPAttr>())
Fn->addFnAttr(llvm::Attribute::StrictFP);
}
@@ -944,8 +981,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
ArgTypes.push_back(VD->getType());
QualType FnType = getContext().getFunctionType(
RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
- DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
- Builder);
+ DI->emitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk);
}
if (ShouldInstrumentFunction()) {
@@ -1104,11 +1140,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
MD->getParent()->getLambdaCaptureDefault() == LCD_None)
SkippedChecks.set(SanitizerKind::Null, true);
- EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
- : TCK_MemberCall,
- Loc, CXXABIThisValue, ThisTy,
- getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
- SkippedChecks);
+ EmitTypeCheck(
+ isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall,
+ Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
}
}
@@ -1143,10 +1177,18 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
incrementProfileCounter(Body);
+ if (CPlusPlusWithProgress())
+ FnIsMustProgress = true;
+
if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
EmitCompoundStmtWithoutScope(*S);
else
EmitStmt(Body);
+
+ // This is checked after emitting the function body so we know if there
+ // are any permitted infinite loops.
+ if (FnIsMustProgress)
+ CurFn->addFnAttr(llvm::Attribute::MustProgress);
}
/// When instrumenting to collect profile data, the counts for some blocks
@@ -1462,16 +1504,99 @@ bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
return true;
}
+/// Determine whether the given condition is an instrumentable condition
+/// (i.e. no "&&" or "||").
+bool CodeGenFunction::isInstrumentedCondition(const Expr *C) {
+ // Bypass simplistic logical-NOT operator before determining whether the
+ // condition contains any other logical operator.
+ if (const UnaryOperator *UnOp = dyn_cast<UnaryOperator>(C->IgnoreParens()))
+ if (UnOp->getOpcode() == UO_LNot)
+ C = UnOp->getSubExpr();
+
+ const BinaryOperator *BOp = dyn_cast<BinaryOperator>(C->IgnoreParens());
+ return (!BOp || !BOp->isLogicalOp());
+}
+
+/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
+/// increments a profile counter based on the semantics of the given logical
+/// operator opcode. This is used to instrument branch condition coverage for
+/// logical operators.
+void CodeGenFunction::EmitBranchToCounterBlock(
+ const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
+ Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
+ // If not instrumenting, just emit a branch.
+ bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
+ if (!InstrumentRegions || !isInstrumentedCondition(Cond))
+ return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
+
+ llvm::BasicBlock *ThenBlock = NULL;
+ llvm::BasicBlock *ElseBlock = NULL;
+ llvm::BasicBlock *NextBlock = NULL;
+
+ // Create the block we'll use to increment the appropriate counter.
+ llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
+
+ // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
+ // means we need to evaluate the condition and increment the counter on TRUE:
+ //
+ // if (Cond)
+ // goto CounterIncrBlock;
+ // else
+ // goto FalseBlock;
+ //
+ // CounterIncrBlock:
+ // Counter++;
+ // goto TrueBlock;
+ if (LOp == BO_LAnd) {
+ ThenBlock = CounterIncrBlock;
+ ElseBlock = FalseBlock;
+ NextBlock = TrueBlock;
+ }
+
+ // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
+ // we need to evaluate the condition and increment the counter on FALSE:
+ //
+ // if (Cond)
+ // goto TrueBlock;
+ // else
+ // goto CounterIncrBlock;
+ //
+ // CounterIncrBlock:
+ // Counter++;
+ // goto FalseBlock;
+
+ else if (LOp == BO_LOr) {
+ ThenBlock = TrueBlock;
+ ElseBlock = CounterIncrBlock;
+ NextBlock = FalseBlock;
+ } else {
+ llvm_unreachable("Expected Opcode must be that of a Logical Operator");
+ }
+
+ // Emit Branch based on condition.
+ EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
+
+ // Emit the block containing the counter increment(s).
+ EmitBlock(CounterIncrBlock);
+
+ // Increment corresponding counter; if index not provided, use Cond as index.
+ incrementProfileCounter(CntrIdx ? CntrIdx : Cond);
+
+ // Go to the next block.
+ EmitBranch(NextBlock);
+}
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
/// statement) to the specified blocks. Based on the condition, this might try
/// to simplify the codegen of the conditional based on the branch.
-///
+/// \param LH The value of the likelihood attribute on the True branch.
void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock,
- uint64_t TrueCount) {
+ uint64_t TrueCount,
+ Stmt::Likelihood LH) {
Cond = Cond->IgnoreParens();
if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
@@ -1485,8 +1610,8 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
ConstantBool) {
// br(1 && X) -> br(X).
incrementProfileCounter(CondBOp);
- return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
- TrueCount);
+ return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
+ FalseBlock, TrueCount, LH);
}
// If we have "X && 1", simplify the code to use an uncond branch.
@@ -1494,8 +1619,8 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
ConstantBool) {
// br(X && 1) -> br(X).
- return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
- TrueCount);
+ return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
+ FalseBlock, TrueCount, LH, CondBOp);
}
// Emit the LHS as a conditional. If the LHS conditional is false, we
@@ -1508,7 +1633,11 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
ConditionalEvaluation eval(*this);
{
ApplyDebugLocation DL(*this, Cond);
- EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
+ // Propagate the likelihood attribute like __builtin_expect
+ // __builtin_expect(X && Y, 1) -> X and Y are likely
+ // __builtin_expect(X && Y, 0) -> only Y is unlikely
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
+ LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
EmitBlock(LHSTrue);
}
@@ -1517,7 +1646,8 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
// Any temporaries created here are conditional.
eval.begin(*this);
- EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
+ EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
+ FalseBlock, TrueCount, LH);
eval.end(*this);
return;
@@ -1531,8 +1661,8 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
!ConstantBool) {
// br(0 || X) -> br(X).
incrementProfileCounter(CondBOp);
- return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
- TrueCount);
+ return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
+ FalseBlock, TrueCount, LH);
}
// If we have "X || 0", simplify the code to use an uncond branch.
@@ -1540,8 +1670,8 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
!ConstantBool) {
// br(X || 0) -> br(X).
- return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
- TrueCount);
+ return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
+ FalseBlock, TrueCount, LH, CondBOp);
}
// Emit the LHS as a conditional. If the LHS conditional is true, we
@@ -1556,8 +1686,12 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
ConditionalEvaluation eval(*this);
{
+ // Propagate the likelihood attribute like __builtin_expect
+ // __builtin_expect(X || Y, 1) -> only Y is likely
+ // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
ApplyDebugLocation DL(*this, Cond);
- EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
+ LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
EmitBlock(LHSFalse);
}
@@ -1566,7 +1700,8 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
// Any temporaries created here are conditional.
eval.begin(*this);
- EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
+ EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
+ RHSCount, LH);
eval.end(*this);
@@ -1579,9 +1714,11 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
if (CondUOp->getOpcode() == UO_LNot) {
// Negate the count.
uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
+ // The values of the enum are chosen to make this negation possible.
+ LH = static_cast<Stmt::Likelihood>(-LH);
// Negate the condition and swap the destination blocks.
return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
- FalseCount);
+ FalseCount, LH);
}
}
@@ -1590,9 +1727,11 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+ // The ConditionalOperator itself has no likelihood information for its
+ // true and false branches. This matches the behavior of __builtin_expect.
ConditionalEvaluation cond(*this);
EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
- getProfileCount(CondOp));
+ getProfileCount(CondOp), Stmt::LH_None);
// When computing PGO branch weights, we only know the overall count for
// the true block. This code is essentially doing tail duplication of the
@@ -1612,14 +1751,14 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
{
ApplyDebugLocation DL(*this, Cond);
EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
- LHSScaledTrueCount);
+ LHSScaledTrueCount, LH);
}
cond.end(*this);
cond.begin(*this);
EmitBlock(RHSBlock);
EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
- TrueCount - LHSScaledTrueCount);
+ TrueCount - LHSScaledTrueCount, LH);
cond.end(*this);
return;
@@ -1648,11 +1787,11 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
}
}
- // Create branch weights based on the number of times we get here and the
- // number of times the condition should be true.
- uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
- llvm::MDNode *Weights =
- createProfileWeights(TrueCount, CurrentCount - TrueCount);
+ llvm::MDNode *Weights = createBranchWeights(LH);
+ if (!Weights) {
+ uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
+ Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
+ }
// Emit the code with the fully general case.
llvm::Value *CondV;
@@ -2075,7 +2214,6 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::UnaryTransform:
case Type::Attributed:
case Type::SubstTemplateTypeParm:
- case Type::PackExpansion:
case Type::MacroQualified:
// Keep walking after single level desugaring.
type = type.getSingleStepDesugaredType(getContext());
@@ -2206,13 +2344,16 @@ void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
llvm::Value *AnnotatedVal,
StringRef AnnotationStr,
- SourceLocation Location) {
- llvm::Value *Args[4] = {
- AnnotatedVal,
- Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
- Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
- CGM.EmitAnnotationLineNo(Location)
+ SourceLocation Location,
+ const AnnotateAttr *Attr) {
+ SmallVector<llvm::Value *, 5> Args = {
+ AnnotatedVal,
+ Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
+ Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
+ CGM.EmitAnnotationLineNo(Location),
};
+ if (Attr)
+ Args.push_back(CGM.EmitAnnotationArgs(Attr));
return Builder.CreateCall(AnnotationFn, Args);
}
@@ -2223,7 +2364,7 @@ void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
for (const auto *I : D->specific_attrs<AnnotateAttr>())
EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
- I->getAnnotation(), D->getLocation());
+ I->getAnnotation(), D->getLocation(), I);
}
Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
@@ -2240,7 +2381,7 @@ Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
// itself.
if (VTy != CGM.Int8PtrTy)
V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
- V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
+ V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
V = Builder.CreateBitCast(V, VTy);
}
@@ -2276,34 +2417,6 @@ void CGBuilderInserter::InsertHelper(
CGF->InsertHelper(I, Name, BB, InsertPt);
}
-static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
- CodeGenModule &CGM, const FunctionDecl *FD,
- std::string &FirstMissing) {
- // If there aren't any required features listed then go ahead and return.
- if (ReqFeatures.empty())
- return false;
-
- // Now build up the set of caller features and verify that all the required
- // features are there.
- llvm::StringMap<bool> CallerFeatureMap;
- CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
-
- // If we have at least one of the features in the feature list return
- // true, otherwise return false.
- return std::all_of(
- ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
- SmallVector<StringRef, 1> OrFeatures;
- Feature.split(OrFeatures, '|');
- return llvm::any_of(OrFeatures, [&](StringRef Feature) {
- if (!CallerFeatureMap.lookup(Feature)) {
- FirstMissing = Feature.str();
- return false;
- }
- return true;
- });
- });
-}
-
// Emits an error if we don't have a valid set of target features for the
// called function.
void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
@@ -2330,19 +2443,20 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
// listed cpu and any listed features.
unsigned BuiltinID = TargetDecl->getBuiltinID();
std::string MissingFeature;
+ llvm::StringMap<bool> CallerFeatureMap;
+ CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
if (BuiltinID) {
- SmallVector<StringRef, 1> ReqFeatures;
- const char *FeatureList =
- CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
+ StringRef FeatureList(
+ CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
// Return if the builtin doesn't have any required features.
- if (!FeatureList || StringRef(FeatureList) == "")
+ if (FeatureList.empty())
return;
- StringRef(FeatureList).split(ReqFeatures, ',');
- if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
+ assert(FeatureList.find(' ') == StringRef::npos &&
+ "Space in feature list");
+ TargetFeatures TF(CallerFeatureMap);
+ if (!TF.hasRequiredFeatures(FeatureList))
CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
- << TargetDecl->getDeclName()
- << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
-
+ << TargetDecl->getDeclName() << FeatureList;
} else if (!TargetDecl->isMultiVersion() &&
TargetDecl->hasAttr<TargetAttr>()) {
// Get the required features for the callee.
@@ -2365,7 +2479,13 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
if (F.getValue())
ReqFeatures.push_back(F.getKey());
}
- if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
+ if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
+ if (!CallerFeatureMap.lookup(Feature)) {
+ MissingFeature = Feature.str();
+ return false;
+ }
+ return true;
+ }))
CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
<< FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
}
@@ -2521,3 +2641,36 @@ llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
return llvm::DebugLoc();
}
+
+static Optional<std::pair<uint32_t, uint32_t>>
+getLikelihoodWeights(Stmt::Likelihood LH) {
+ switch (LH) {
+ case Stmt::LH_Unlikely:
+ return std::pair<uint32_t, uint32_t>(llvm::UnlikelyBranchWeight,
+ llvm::LikelyBranchWeight);
+ case Stmt::LH_None:
+ return None;
+ case Stmt::LH_Likely:
+ return std::pair<uint32_t, uint32_t>(llvm::LikelyBranchWeight,
+ llvm::UnlikelyBranchWeight);
+ }
+ llvm_unreachable("Unknown Likelihood");
+}
+
+llvm::MDNode *CodeGenFunction::createBranchWeights(Stmt::Likelihood LH) const {
+ Optional<std::pair<uint32_t, uint32_t>> LHW = getLikelihoodWeights(LH);
+ if (!LHW)
+ return nullptr;
+
+ llvm::MDBuilder MDHelper(CGM.getLLVMContext());
+ return MDHelper.createBranchWeights(LHW->first, LHW->second);
+}
+
+llvm::MDNode *CodeGenFunction::createProfileOrBranchWeightsForLoop(
+ const Stmt *Cond, uint64_t LoopCount, const Stmt *Body) const {
+ llvm::MDNode *Weights = createProfileWeightsForLoop(Cond, LoopCount);
+ if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
+ Weights = createBranchWeights(Stmt::getLikelihood(Body));
+
+ return Weights;
+}
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index d794f4f0fa81..8eb7adbc8fcb 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -502,6 +502,26 @@ public:
/// True if the current statement has nomerge attribute.
bool InNoMergeAttributedStmt = false;
+ /// True if the current function should be marked mustprogress.
+ bool FnIsMustProgress = false;
+
+ /// True if the C++ Standard Requires Progress.
+ bool CPlusPlusWithProgress() {
+ return getLangOpts().CPlusPlus11 || getLangOpts().CPlusPlus14 ||
+ getLangOpts().CPlusPlus17 || getLangOpts().CPlusPlus20;
+ }
+
+ /// True if the C Standard Requires Progress.
+ bool CWithProgress() {
+ return getLangOpts().C11 || getLangOpts().C17 || getLangOpts().C2x;
+ }
+
+ /// True if the language standard requires progress in functions or
+ /// in infinite loops with non-constant conditionals.
+ bool LanguageRequiresProgress() {
+ return CWithProgress() || CPlusPlusWithProgress();
+ }
+
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
llvm::Value *BlockPointer = nullptr;
@@ -608,11 +628,15 @@ public:
class CGFPOptionsRAII {
public:
CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
+ CGFPOptionsRAII(CodeGenFunction &CGF, const Expr *E);
~CGFPOptionsRAII();
private:
+ void ConstructorHelper(FPOptions FPFeatures);
CodeGenFunction &CGF;
FPOptions OldFPFeatures;
+ llvm::fp::ExceptionBehavior OldExcept;
+ llvm::RoundingMode OldRounding;
Optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
};
FPOptions CurFPFeatures;
@@ -672,12 +696,13 @@ public:
initFullExprCleanup();
}
- /// Queue a cleanup to be pushed after finishing the current
- /// full-expression.
+ /// Queue a cleanup to be pushed after finishing the current full-expression,
+ /// potentially with an active flag.
template <class T, class... As>
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
if (!isInConditionalBranch())
- return pushCleanupAfterFullExprImpl<T>(Kind, Address::invalid(), A...);
+ return pushCleanupAfterFullExprWithActiveFlag<T>(Kind, Address::invalid(),
+ A...);
Address ActiveFlag = createCleanupActiveFlag();
assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
@@ -687,12 +712,12 @@ public:
SavedTuple Saved{saveValueInCond(A)...};
typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
- pushCleanupAfterFullExprImpl<CleanupType>(Kind, ActiveFlag, Saved);
+ pushCleanupAfterFullExprWithActiveFlag<CleanupType>(Kind, ActiveFlag, Saved);
}
template <class T, class... As>
- void pushCleanupAfterFullExprImpl(CleanupKind Kind, Address ActiveFlag,
- As... A) {
+ void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind,
+ Address ActiveFlag, As... A) {
LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
ActiveFlag.isValid()};
@@ -1394,19 +1419,31 @@ private:
};
OpenMPCancelExitStack OMPCancelStack;
+ /// Calculate branch weights for the likelihood attribute
+ llvm::MDNode *createBranchWeights(Stmt::Likelihood LH) const;
+
CodeGenPGO PGO;
/// Calculate branch weights appropriate for PGO data
- llvm::MDNode *createProfileWeights(uint64_t TrueCount, uint64_t FalseCount);
- llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights);
+ llvm::MDNode *createProfileWeights(uint64_t TrueCount,
+ uint64_t FalseCount) const;
+ llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights) const;
llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
- uint64_t LoopCount);
+ uint64_t LoopCount) const;
+
+ /// Calculate the branch weight for PGO data or the likelihood attribute.
+ /// The function tries to get the weight of \ref createProfileWeightsForLoop.
+ /// If that fails it gets the weight of \ref createBranchWeights.
+ llvm::MDNode *createProfileOrBranchWeightsForLoop(const Stmt *Cond,
+ uint64_t LoopCount,
+ const Stmt *Body) const;
public:
/// Increment the profiler's counter for the given statement by \p StepV.
/// If \p StepV is null, the default increment is 1.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
- if (CGM.getCodeGenOpts().hasProfileClangInstr())
+ if (CGM.getCodeGenOpts().hasProfileClangInstr() &&
+ !CurFn->hasFnAttribute(llvm::Attribute::NoProfile))
PGO.emitCounterIncrement(Builder, S, StepV);
PGO.setCurrentStmt(S);
}
@@ -1438,6 +1475,9 @@ private:
/// The branch weights of SwitchInsn when doing instrumentation based PGO.
SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
+ /// The likelihood attributes of the SwitchCase.
+ SmallVector<Stmt::Likelihood, 16> *SwitchLikelihood = nullptr;
+
/// CaseRangeBlock - This block holds if condition check for last case
/// statement range in current switch instruction.
llvm::BasicBlock *CaseRangeBlock = nullptr;
@@ -1817,7 +1857,7 @@ private:
llvm::BasicBlock *TerminateLandingPad = nullptr;
llvm::BasicBlock *TerminateHandler = nullptr;
- llvm::BasicBlock *TrapBB = nullptr;
+ llvm::SmallVector<llvm::BasicBlock *, 2> TrapBBs;
/// Terminate funclets keyed by parent funclet pad.
llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
@@ -3074,7 +3114,7 @@ public:
/// statements.
///
/// \return True if the statement was handled.
- bool EmitSimpleStmt(const Stmt *S);
+ bool EmitSimpleStmt(const Stmt *S, ArrayRef<const Attr *> Attrs);
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
AggValueSlot AVS = AggValueSlot::ignored());
@@ -3103,9 +3143,9 @@ public:
void EmitBreakStmt(const BreakStmt &S);
void EmitContinueStmt(const ContinueStmt &S);
void EmitSwitchStmt(const SwitchStmt &S);
- void EmitDefaultStmt(const DefaultStmt &S);
- void EmitCaseStmt(const CaseStmt &S);
- void EmitCaseStmtRange(const CaseStmt &S);
+ void EmitDefaultStmt(const DefaultStmt &S, ArrayRef<const Attr *> Attrs);
+ void EmitCaseStmt(const CaseStmt &S, ArrayRef<const Attr *> Attrs);
+ void EmitCaseStmtRange(const CaseStmt &S, ArrayRef<const Attr *> Attrs);
void EmitAsmStmt(const AsmStmt &S);
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
@@ -3334,12 +3374,15 @@ public:
Address BasePointersArray = Address::invalid();
Address PointersArray = Address::invalid();
Address SizesArray = Address::invalid();
+ Address MappersArray = Address::invalid();
unsigned NumberOfTargetItems = 0;
explicit OMPTargetDataInfo() = default;
OMPTargetDataInfo(Address BasePointersArray, Address PointersArray,
- Address SizesArray, unsigned NumberOfTargetItems)
+ Address SizesArray, Address MappersArray,
+ unsigned NumberOfTargetItems)
: BasePointersArray(BasePointersArray), PointersArray(PointersArray),
- SizesArray(SizesArray), NumberOfTargetItems(NumberOfTargetItems) {}
+ SizesArray(SizesArray), MappersArray(MappersArray),
+ NumberOfTargetItems(NumberOfTargetItems) {}
};
void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
const RegionCodeGenTy &BodyGen,
@@ -3562,6 +3605,9 @@ public:
// LValue Expression Emission
//===--------------------------------------------------------------------===//
+ /// Create a check that a scalar RValue is non-null.
+ llvm::Value *EmitNonNullRValueCheck(RValue RV, QualType T);
+
/// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
RValue GetUndefRValue(QualType Ty);
@@ -4075,13 +4121,10 @@ public:
llvm::AtomicOrdering &AO,
llvm::SyncScope::ID &SSID);
-private:
enum class MSVCIntrin;
-
-public:
llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
- llvm::Value *EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args);
+ llvm::Value *EmitBuiltinAvailable(const VersionTuple &Version);
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
@@ -4259,7 +4302,7 @@ public:
void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
/// Call unatexit() with function dtorStub.
- llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Function *dtorStub);
+ llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub);
/// Emit code in this function to perform a guarded variable
/// initialization. Guarded initializations are used when it's not
@@ -4312,7 +4355,8 @@ public:
llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
llvm::Value *AnnotatedVal,
StringRef AnnotationStr,
- SourceLocation Location);
+ SourceLocation Location,
+ const AnnotateAttr *Attr);
/// Emit local annotations for the local variable V, declared by D.
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
@@ -4351,13 +4395,29 @@ public:
bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
bool AllowLabels = false);
+ /// isInstrumentedCondition - Determine whether the given condition is an
+ /// instrumentable condition (i.e. no "&&" or "||").
+ static bool isInstrumentedCondition(const Expr *C);
+
+ /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
+ /// increments a profile counter based on the semantics of the given logical
+ /// operator opcode. This is used to instrument branch condition coverage
+ /// for logical operators.
+ void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp,
+ llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock,
+ uint64_t TrueCount = 0,
+ Stmt::Likelihood LH = Stmt::LH_None,
+ const Expr *CntrIdx = nullptr);
+
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
/// if statement) to the specified blocks. Based on the condition, this might
/// try to simplify the codegen of the conditional based on the branch.
/// TrueCount should be the number of times we expect the condition to
/// evaluate to true based on PGO data.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
- llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
+ llvm::BasicBlock *FalseBlock, uint64_t TrueCount,
+ Stmt::Likelihood LH = Stmt::LH_None);
/// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
/// nonnull, if \p LHS is marked _Nonnull.
@@ -4421,7 +4481,7 @@ public:
/// Create a basic block that will call the trap intrinsic, and emit a
/// conditional branch to it, for the -ftrapv checks.
- void EmitTrapCheck(llvm::Value *Checked);
+ void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID);
/// Emit a call to trap or debugtrap and attach function attribute
/// "trap-func-name" if specified.
@@ -4516,26 +4576,6 @@ private:
Address Loc);
public:
-#ifndef NDEBUG
- // Determine whether the given argument is an Objective-C method
- // that may have type parameters in its signature.
- static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
- const DeclContext *dc = method->getDeclContext();
- if (const ObjCInterfaceDecl *classDecl= dyn_cast<ObjCInterfaceDecl>(dc)) {
- return classDecl->getTypeParamListAsWritten();
- }
-
- if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
- return catDecl->getTypeParamList();
- }
-
- return false;
- }
-
- template<typename T>
- static bool isObjCMethodWithTypeParams(const T *) { return false; }
-#endif
-
enum class EvaluationOrder {
///! No language constraints on evaluation order.
Default,
@@ -4545,56 +4585,16 @@ public:
ForceRightToLeft
};
- /// EmitCallArgs - Emit call arguments for a function.
- template <typename T>
- void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
- llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
- AbstractCallee AC = AbstractCallee(),
- unsigned ParamsToSkip = 0,
- EvaluationOrder Order = EvaluationOrder::Default) {
- SmallVector<QualType, 16> ArgTypes;
- CallExpr::const_arg_iterator Arg = ArgRange.begin();
-
- assert((ParamsToSkip == 0 || CallArgTypeInfo) &&
- "Can't skip parameters if type info is not provided");
- if (CallArgTypeInfo) {
-#ifndef NDEBUG
- bool isGenericMethod = isObjCMethodWithTypeParams(CallArgTypeInfo);
-#endif
-
- // First, use the argument types that the type info knows about
- for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip,
- E = CallArgTypeInfo->param_type_end();
- I != E; ++I, ++Arg) {
- assert(Arg != ArgRange.end() && "Running over edge of argument list!");
- assert((isGenericMethod ||
- ((*I)->isVariablyModifiedType() ||
- (*I).getNonReferenceType()->isObjCRetainableType() ||
- getContext()
- .getCanonicalType((*I).getNonReferenceType())
- .getTypePtr() ==
- getContext()
- .getCanonicalType((*Arg)->getType())
- .getTypePtr())) &&
- "type mismatch in call argument!");
- ArgTypes.push_back(*I);
- }
- }
-
- // Either we've emitted all the call args, or we have a call to variadic
- // function.
- assert((Arg == ArgRange.end() || !CallArgTypeInfo ||
- CallArgTypeInfo->isVariadic()) &&
- "Extra arguments in non-variadic function!");
-
- // If we still have any arguments, emit them using the type of the argument.
- for (auto *A : llvm::make_range(Arg, ArgRange.end()))
- ArgTypes.push_back(CallArgTypeInfo ? getVarArgType(A) : A->getType());
+ // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
+ // an ObjCMethodDecl.
+ struct PrototypeWrapper {
+ llvm::PointerUnion<const FunctionProtoType *, const ObjCMethodDecl *> P;
- EmitCallArgs(Args, ArgTypes, ArgRange, AC, ParamsToSkip, Order);
- }
+ PrototypeWrapper(const FunctionProtoType *FT) : P(FT) {}
+ PrototypeWrapper(const ObjCMethodDecl *MD) : P(MD) {}
+ };
- void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
+ void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype,
llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
AbstractCallee AC = AbstractCallee(),
unsigned ParamsToSkip = 0,
@@ -4672,6 +4672,77 @@ private:
llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO);
};
+/// TargetFeatures - This class is used to check whether the builtin function
+/// has the required tagert specific features. It is able to support the
+/// combination of ','(and), '|'(or), and '()'. By default, the priority of
+/// ',' is higher than that of '|' .
+/// E.g:
+/// A,B|C means the builtin function requires both A and B, or C.
+/// If we want the builtin function requires both A and B, or both A and C,
+/// there are two ways: A,B|A,C or A,(B|C).
+/// The FeaturesList should not contain spaces, and brackets must appear in
+/// pairs.
+class TargetFeatures {
+ struct FeatureListStatus {
+ bool HasFeatures;
+ StringRef CurFeaturesList;
+ };
+
+ const llvm::StringMap<bool> &CallerFeatureMap;
+
+ FeatureListStatus getAndFeatures(StringRef FeatureList) {
+ int InParentheses = 0;
+ bool HasFeatures = true;
+ size_t SubexpressionStart = 0;
+ for (size_t i = 0, e = FeatureList.size(); i < e; ++i) {
+ char CurrentToken = FeatureList[i];
+ switch (CurrentToken) {
+ default:
+ break;
+ case '(':
+ if (InParentheses == 0)
+ SubexpressionStart = i + 1;
+ ++InParentheses;
+ break;
+ case ')':
+ --InParentheses;
+ assert(InParentheses >= 0 && "Parentheses are not in pair");
+ LLVM_FALLTHROUGH;
+ case '|':
+ case ',':
+ if (InParentheses == 0) {
+ if (HasFeatures && i != SubexpressionStart) {
+ StringRef F = FeatureList.slice(SubexpressionStart, i);
+ HasFeatures = CurrentToken == ')' ? hasRequiredFeatures(F)
+ : CallerFeatureMap.lookup(F);
+ }
+ SubexpressionStart = i + 1;
+ if (CurrentToken == '|') {
+ return {HasFeatures, FeatureList.substr(SubexpressionStart)};
+ }
+ }
+ break;
+ }
+ }
+ assert(InParentheses == 0 && "Parentheses are not in pair");
+ if (HasFeatures && SubexpressionStart != FeatureList.size())
+ HasFeatures =
+ CallerFeatureMap.lookup(FeatureList.substr(SubexpressionStart));
+ return {HasFeatures, StringRef()};
+ }
+
+public:
+ bool hasRequiredFeatures(StringRef FeatureList) {
+ FeatureListStatus FS = {false, FeatureList};
+ while (!FS.HasFeatures && !FS.CurFeaturesList.empty())
+ FS = getAndFeatures(FS.CurFeaturesList);
+ return FS.HasFeatures;
+ }
+
+ TargetFeatures(const llvm::StringMap<bool> &CallerFeatureMap)
+ : CallerFeatureMap(CallerFeatureMap) {}
+};
+
inline DominatingLLVMValue::saved_type
DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
if (!needsSaving(value)) return saved_type(value, false);
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 4ae8ce7e5ccf..31afbc6b4262 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -19,6 +19,7 @@
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGOpenMPRuntime.h"
+#include "CGOpenMPRuntimeAMDGCN.h"
#include "CGOpenMPRuntimeNVPTX.h"
#include "CodeGenFunction.h"
#include "CodeGenPGO.h"
@@ -75,11 +76,11 @@ static const char AnnotationSection[] = "llvm.metadata";
static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
switch (CGM.getTarget().getCXXABI().getKind()) {
+ case TargetCXXABI::AppleARM64:
case TargetCXXABI::Fuchsia:
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
- case TargetCXXABI::iOS64:
case TargetCXXABI::WatchOS:
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
@@ -122,6 +123,8 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
C.toCharUnitsFromBits(C.getTargetInfo().getMaxPointerWidth()).getQuantity();
IntAlignInBytes =
C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
+ CharTy =
+ llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getCharWidth());
IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
IntPtrTy = llvm::IntegerType::get(LLVMContext,
C.getTargetInfo().getMaxPointerWidth());
@@ -215,6 +218,11 @@ void CodeGenModule::createOpenMPRuntime() {
"OpenMP NVPTX is only prepared to deal with device code.");
OpenMPRuntime.reset(new CGOpenMPRuntimeNVPTX(*this));
break;
+ case llvm::Triple::amdgcn:
+ assert(getLangOpts().OpenMPIsDevice &&
+ "OpenMP AMDGCN is only prepared to deal with device code.");
+ OpenMPRuntime.reset(new CGOpenMPRuntimeAMDGCN(*this));
+ break;
default:
if (LangOpts.OpenMPSimd)
OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this));
@@ -366,7 +374,7 @@ void CodeGenModule::checkAliases() {
for (const GlobalDecl &GD : Aliases) {
StringRef MangledName = getMangledName(GD);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
- auto *Alias = dyn_cast<llvm::GlobalIndirectSymbol>(Entry);
+ auto *Alias = cast<llvm::GlobalIndirectSymbol>(Entry);
Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
Alias->eraseFromParent();
}
@@ -395,6 +403,48 @@ void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
}
}
+static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO,
+ llvm::Module &M) {
+ if (!LO.VisibilityFromDLLStorageClass)
+ return;
+
+ llvm::GlobalValue::VisibilityTypes DLLExportVisibility =
+ CodeGenModule::GetLLVMVisibility(LO.getDLLExportVisibility());
+ llvm::GlobalValue::VisibilityTypes NoDLLStorageClassVisibility =
+ CodeGenModule::GetLLVMVisibility(LO.getNoDLLStorageClassVisibility());
+ llvm::GlobalValue::VisibilityTypes ExternDeclDLLImportVisibility =
+ CodeGenModule::GetLLVMVisibility(LO.getExternDeclDLLImportVisibility());
+ llvm::GlobalValue::VisibilityTypes ExternDeclNoDLLStorageClassVisibility =
+ CodeGenModule::GetLLVMVisibility(
+ LO.getExternDeclNoDLLStorageClassVisibility());
+
+ for (llvm::GlobalValue &GV : M.global_values()) {
+ if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
+ continue;
+
+ // Reset DSO locality before setting the visibility. This removes
+ // any effects that visibility options and annotations may have
+ // had on the DSO locality. Setting the visibility will implicitly set
+ // appropriate globals to DSO Local; however, this will be pessimistic
+ // w.r.t. to the normal compiler IRGen.
+ GV.setDSOLocal(false);
+
+ if (GV.isDeclarationForLinker()) {
+ GV.setVisibility(GV.getDLLStorageClass() ==
+ llvm::GlobalValue::DLLImportStorageClass
+ ? ExternDeclDLLImportVisibility
+ : ExternDeclNoDLLStorageClassVisibility);
+ } else {
+ GV.setVisibility(GV.getDLLStorageClass() ==
+ llvm::GlobalValue::DLLExportStorageClass
+ ? DLLExportVisibility
+ : NoDLLStorageClassVisibility);
+ }
+
+ GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+ }
+}
+
void CodeGenModule::Release() {
EmitDeferred();
EmitVTablesOpportunistically();
@@ -486,9 +536,6 @@ void CodeGenModule::Release() {
if (Context.getLangOpts().SemanticInterposition)
// Require various optimization to respect semantic interposition.
getModule().setSemanticInterposition(1);
- else if (Context.getLangOpts().ExplicitNoSemanticInterposition)
- // Allow dso_local on applicable targets.
- getModule().setSemanticInterposition(0);
if (CodeGenOpts.EmitCodeView) {
// Indicate that we want CodeView in the metadata.
@@ -584,6 +631,30 @@ void CodeGenModule::Release() {
1);
}
+ if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
+ Arch == llvm::Triple::aarch64_be) {
+ getModule().addModuleFlag(llvm::Module::Error,
+ "branch-target-enforcement",
+ LangOpts.BranchTargetEnforcement);
+
+ getModule().addModuleFlag(llvm::Module::Error, "sign-return-address",
+ LangOpts.hasSignReturnAddress());
+
+ getModule().addModuleFlag(llvm::Module::Error, "sign-return-address-all",
+ LangOpts.isSignReturnAddressScopeAll());
+
+ getModule().addModuleFlag(llvm::Module::Error,
+ "sign-return-address-with-bkey",
+ !LangOpts.isSignReturnAddressWithAKey());
+ }
+
+ if (!CodeGenOpts.MemoryProfileOutput.empty()) {
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+ getModule().addModuleFlag(
+ llvm::Module::Error, "MemProfProfileFilename",
+ llvm::MDString::get(Ctx, CodeGenOpts.MemoryProfileOutput));
+ }
+
if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
// Indicate whether __nvvm_reflect should be configured to flush denormal
// floating point values to 0. (This corresponds to its "__CUDA_FTZ"
@@ -658,6 +729,12 @@ void CodeGenModule::Release() {
getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
EmitBackendOptionsMetadata(getCodeGenOpts());
+
+ // Set visibility from DLL storage class
+ // We do this at the end of LLVM IR generation; after any operation
+ // that might affect the DLL storage class or the visibility, and
+ // before anything that might act on these.
+ setVisibilityFromDLLStorageClass(LangOpts, getModule());
}
void CodeGenModule::EmitOpenCLMetadata() {
@@ -868,17 +945,32 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
return true;
- // Only handle COFF and ELF for now.
- if (!TT.isOSBinFormatELF())
- return false;
-
- // If this is not an executable, don't assume anything is local.
const auto &CGOpts = CGM.getCodeGenOpts();
llvm::Reloc::Model RM = CGOpts.RelocationModel;
const auto &LOpts = CGM.getLangOpts();
- if (RM != llvm::Reloc::Static && !LOpts.PIE)
+
+ if (TT.isOSBinFormatMachO()) {
+ if (RM == llvm::Reloc::Static)
+ return true;
+ return GV->isStrongDefinitionForLinker();
+ }
+
+ // Only handle COFF and ELF for now.
+ if (!TT.isOSBinFormatELF())
return false;
+ if (RM != llvm::Reloc::Static && !LOpts.PIE) {
+ // On ELF, if -fno-semantic-interposition is specified and the target
+ // supports local aliases, there will be neither CC1
+ // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
+ // dso_local if using a local alias is preferable (can avoid GOT
+ // indirection).
+ if (!GV->canBenefitFromLocalAlias())
+ return false;
+ return !(CGM.getLangOpts().SemanticInterposition ||
+ CGM.getLangOpts().HalfNoSemanticInterposition);
+ }
+
// A definition cannot be preempted from an executable.
if (!GV->isDeclarationForLinker())
return true;
@@ -889,23 +981,31 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
return false;
- // PPC has no copy relocations and cannot use a plt entry as a symbol address.
- llvm::Triple::ArchType Arch = TT.getArch();
- if (Arch == llvm::Triple::ppc || Arch == llvm::Triple::ppc64 ||
- Arch == llvm::Triple::ppc64le)
+ // PowerPC64 prefers TOC indirection to avoid copy relocations.
+ if (TT.isPPC64())
return false;
- // If we can use copy relocations we can assume it is local.
- if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
- if (!Var->isThreadLocal() &&
- (RM == llvm::Reloc::Static || CGOpts.PIECopyRelocations))
+ if (CGOpts.DirectAccessExternalData) {
+ // If -fdirect-access-external-data (default for -fno-pic), set dso_local
+ // for non-thread-local variables. If the symbol is not defined in the
+ // executable, a copy relocation will be needed at link time. dso_local is
+ // excluded for thread-local variables because they generally don't support
+ // copy relocations.
+ if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
+ if (!Var->isThreadLocal())
+ return true;
+
+ // -fno-pic sets dso_local on a function declaration to allow direct
+ // accesses when taking its address (similar to a data symbol). If the
+ // function is not defined in the executable, a canonical PLT entry will be
+ // needed at link time. -fno-direct-access-external-data can avoid the
+ // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as
+ // it could just cause trouble without providing perceptible benefits.
+ if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
return true;
+ }
- // If we can use a plt entry as the symbol address we can assume it
- // is local.
- // FIXME: This should work for PIE, but the gold linker doesn't support it.
- if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
- return true;
+ // If we can use copy relocations we can assume it is local.
// Otherwise don't assume it is local.
return false;
@@ -1207,8 +1307,10 @@ void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
/// AddGlobalDtor - Add a function to the list that will be called
/// when the module is unloaded.
-void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority) {
- if (CodeGenOpts.RegisterGlobalDtorsWithAtExit) {
+void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
+ bool IsDtorAttrFunc) {
+ if (CodeGenOpts.RegisterGlobalDtorsWithAtExit &&
+ (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) {
DtorsUsingAtExit[Priority].push_back(Dtor);
return;
}
@@ -1321,10 +1423,18 @@ static void removeImageAccessQualifier(std::string& TyName) {
// (basically all single AS CPUs).
static unsigned ArgInfoAddressSpace(LangAS AS) {
switch (AS) {
- case LangAS::opencl_global: return 1;
- case LangAS::opencl_constant: return 2;
- case LangAS::opencl_local: return 3;
- case LangAS::opencl_generic: return 4; // Not in SPIR 2.0 specs.
+ case LangAS::opencl_global:
+ return 1;
+ case LangAS::opencl_constant:
+ return 2;
+ case LangAS::opencl_local:
+ return 3;
+ case LangAS::opencl_generic:
+ return 4; // Not in SPIR 2.0 specs.
+ case LangAS::opencl_global_device:
+ return 5;
+ case LangAS::opencl_global_host:
+ return 6;
default:
return 0; // Assume private.
}
@@ -1658,7 +1768,8 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
B.addAttribute(llvm::Attribute::OptimizeForSize);
B.addAttribute(llvm::Attribute::Cold);
}
-
+ if (D->hasAttr<HotAttr>())
+ B.addAttribute(llvm::Attribute::Hot);
if (D->hasAttr<MinSizeAttr>())
B.addAttribute(llvm::Attribute::MinSize);
}
@@ -1708,6 +1819,15 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
}
}
+void CodeGenModule::setLLVMFunctionFEnvAttributes(const FunctionDecl *D,
+ llvm::Function *F) {
+ if (D->hasAttr<StrictFPAttr>()) {
+ llvm::AttrBuilder FuncAttrs;
+ FuncAttrs.addAttribute("strictfp");
+ F->addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
+ }
+}
+
void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
const Decl *D = GD.getDecl();
if (dyn_cast_or_null<NamedDecl>(D))
@@ -1732,6 +1852,7 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
// we have a decl for the function and it has a target attribute then
// parse that and add it to the feature set.
StringRef TargetCPU = getTarget().getTargetOpts().CPU;
+ StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU;
std::vector<std::string> Features;
const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
FD = FD ? FD->getMostRecentDecl() : FD;
@@ -1752,9 +1873,14 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
// the function.
if (TD) {
ParsedTargetAttr ParsedAttr = TD->parse();
- if (ParsedAttr.Architecture != "" &&
- getTarget().isValidCPUName(ParsedAttr.Architecture))
+ if (!ParsedAttr.Architecture.empty() &&
+ getTarget().isValidCPUName(ParsedAttr.Architecture)) {
TargetCPU = ParsedAttr.Architecture;
+ TuneCPU = ""; // Clear the tune CPU.
+ }
+ if (!ParsedAttr.Tune.empty() &&
+ getTarget().isValidCPUName(ParsedAttr.Tune))
+ TuneCPU = ParsedAttr.Tune;
}
} else {
// Otherwise just add the existing target cpu and target features to the
@@ -1762,10 +1888,14 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
Features = getTarget().getTargetOpts().Features;
}
- if (TargetCPU != "") {
+ if (!TargetCPU.empty()) {
Attrs.addAttribute("target-cpu", TargetCPU);
AddedAttr = true;
}
+ if (!TuneCPU.empty()) {
+ Attrs.addAttribute("tune-cpu", TuneCPU);
+ AddedAttr = true;
+ }
if (!Features.empty()) {
llvm::sort(Features);
Attrs.addAttribute("target-features", llvm::join(Features, ","));
@@ -1802,8 +1932,11 @@ void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
// We know that GetCPUAndFeaturesAttributes will always have the
// newest set, since it has the newest possible FunctionDecl, so the
// new ones should replace the old.
- F->removeFnAttr("target-cpu");
- F->removeFnAttr("target-features");
+ llvm::AttrBuilder RemoveAttrs;
+ RemoveAttrs.addAttribute("target-cpu");
+ RemoveAttrs.addAttribute("target-features");
+ RemoveAttrs.addAttribute("tune-cpu");
+ F->removeAttributes(llvm::AttributeList::FunctionIndex, RemoveAttrs);
F->addAttributes(llvm::AttributeList::FunctionIndex, Attrs);
}
}
@@ -1959,7 +2092,7 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
}
void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
- assert(!GV->isDeclaration() &&
+ assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
"Only globals with definition can force usage.");
LLVMUsed.emplace_back(GV);
}
@@ -2165,6 +2298,11 @@ void CodeGenModule::EmitDeferred() {
assert(DeferredVTables.empty());
}
+ // Emit CUDA/HIP static device variables referenced by host code only.
+ if (getLangOpts().CUDA)
+ for (auto V : getContext().CUDAStaticDeviceVarReferencedByHost)
+ DeferredDeclsToEmit.push_back(V);
+
// Stop if we're out of both deferred vtables and deferred declarations.
if (DeferredDeclsToEmit.empty())
return;
@@ -2280,13 +2418,47 @@ llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
return llvm::ConstantInt::get(Int32Ty, LineNo);
}
+llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) {
+ ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()};
+ if (Exprs.empty())
+ return llvm::ConstantPointerNull::get(Int8PtrTy);
+
+ llvm::FoldingSetNodeID ID;
+ for (Expr *E : Exprs) {
+ ID.Add(cast<clang::ConstantExpr>(E)->getAPValueResult());
+ }
+ llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()];
+ if (Lookup)
+ return Lookup;
+
+ llvm::SmallVector<llvm::Constant *, 4> LLVMArgs;
+ LLVMArgs.reserve(Exprs.size());
+ ConstantEmitter ConstEmiter(*this);
+ llvm::transform(Exprs, std::back_inserter(LLVMArgs), [&](const Expr *E) {
+ const auto *CE = cast<clang::ConstantExpr>(E);
+ return ConstEmiter.emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(),
+ CE->getType());
+ });
+ auto *Struct = llvm::ConstantStruct::getAnon(LLVMArgs);
+ auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true,
+ llvm::GlobalValue::PrivateLinkage, Struct,
+ ".args");
+ GV->setSection(AnnotationSection);
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ auto *Bitcasted = llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+
+ Lookup = Bitcasted;
+ return Bitcasted;
+}
+
llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
const AnnotateAttr *AA,
SourceLocation L) {
// Get the globals for file name, annotation, and the line number.
llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
*UnitGV = EmitAnnotationUnit(L),
- *LineNoCst = EmitAnnotationLineNo(L);
+ *LineNoCst = EmitAnnotationLineNo(L),
+ *Args = EmitAnnotationArgs(AA);
llvm::Constant *ASZeroGV = GV;
if (GV->getAddressSpace() != 0) {
@@ -2295,11 +2467,12 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
}
// Create the ConstantStruct for the global annotation.
- llvm::Constant *Fields[4] = {
- llvm::ConstantExpr::getBitCast(ASZeroGV, Int8PtrTy),
- llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy),
- llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy),
- LineNoCst
+ llvm::Constant *Fields[] = {
+ llvm::ConstantExpr::getBitCast(ASZeroGV, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy),
+ LineNoCst,
+ Args,
};
return llvm::ConstantStruct::getAnon(Fields);
}
@@ -2390,6 +2563,34 @@ bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
return true;
}
+bool CodeGenModule::isProfileInstrExcluded(llvm::Function *Fn,
+ SourceLocation Loc) const {
+ const auto &ProfileList = getContext().getProfileList();
+ // If the profile list is empty, then instrument everything.
+ if (ProfileList.isEmpty())
+ return false;
+ CodeGenOptions::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr();
+ // First, check the function name.
+ Optional<bool> V = ProfileList.isFunctionExcluded(Fn->getName(), Kind);
+ if (V.hasValue())
+ return *V;
+ // Next, check the source location.
+ if (Loc.isValid()) {
+ Optional<bool> V = ProfileList.isLocationExcluded(Loc, Kind);
+ if (V.hasValue())
+ return *V;
+ }
+ // If location is unknown, this may be a compiler-generated function. Assume
+ // it's located in the main file.
+ auto &SM = Context.getSourceManager();
+ if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ Optional<bool> V = ProfileList.isFileExcluded(MainFile->getName(), Kind);
+ if (V.hasValue())
+ return *V;
+ }
+ return ProfileList.getDefault();
+}
+
bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
// Never defer when EmitAllDecls is specified.
if (LangOpts.EmitAllDecls)
@@ -2490,6 +2691,33 @@ ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
return ConstantAddress(Addr, Alignment);
}
+ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
+ const TemplateParamObjectDecl *TPO) {
+ StringRef Name = getMangledName(TPO);
+ CharUnits Alignment = getNaturalTypeAlignment(TPO->getType());
+
+ if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
+ return ConstantAddress(GV, Alignment);
+
+ ConstantEmitter Emitter(*this);
+ llvm::Constant *Init = Emitter.emitForInitializer(
+ TPO->getValue(), TPO->getType().getAddressSpace(), TPO->getType());
+
+ if (!Init) {
+ ErrorUnsupported(TPO, "template parameter object");
+ return ConstantAddress::invalid();
+ }
+
+ auto *GV = new llvm::GlobalVariable(
+ getModule(), Init->getType(),
+ /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
+ if (supportsCOMDAT())
+ GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
+ Emitter.finalize(GV);
+
+ return ConstantAddress(GV, Alignment);
+}
+
ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
const AliasAttr *AA = VD->getAttr<AliasAttr>();
assert(AA && "No alias?");
@@ -3789,6 +4017,8 @@ LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
if (LangOpts.OpenCL) {
AddrSpace = D ? D->getType().getAddressSpace() : LangAS::opencl_global;
assert(AddrSpace == LangAS::opencl_global ||
+ AddrSpace == LangAS::opencl_global_device ||
+ AddrSpace == LangAS::opencl_global_host ||
AddrSpace == LangAS::opencl_constant ||
AddrSpace == LangAS::opencl_local ||
AddrSpace >= LangAS::FirstTargetAddressSpace);
@@ -3952,13 +4182,14 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// Shadows of initialized device-side global variables are also left
// undefined.
bool IsCUDAShadowVar =
- !getLangOpts().CUDAIsDevice &&
+ !getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
(D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
D->hasAttr<CUDASharedAttr>());
bool IsCUDADeviceShadowVar =
getLangOpts().CUDAIsDevice &&
(D->getType()->isCUDADeviceBuiltinSurfaceType() ||
- D->getType()->isCUDADeviceBuiltinTextureType());
+ D->getType()->isCUDADeviceBuiltinTextureType() ||
+ D->hasAttr<HIPManagedAttr>());
// HIP pinned shadow of initialized host-side global variables are also
// left undefined.
if (getLangOpts().CUDA &&
@@ -4075,7 +4306,12 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// Shadow variables and their properties must be registered with CUDA
// runtime. Skip Extern global variables, which will be registered in
// the TU where they are defined.
- if (!D->hasExternalStorage())
+ //
+ // Don't register a C++17 inline variable. The local symbol can be
+ // discarded and referencing a discarded local symbol from outside the
+ // comdat (__cuda_register_globals) is disallowed by the ELF spec.
+ // TODO: Reject __device__ constexpr and __device__ inline in Sema.
+ if (!D->hasExternalStorage() && !D->isInline())
getCUDARuntime().registerDeviceVar(D, *GV, !D->hasDefinition(),
D->hasAttr<CUDAConstantAttr>());
} else if (D->hasAttr<CUDASharedAttr>()) {
@@ -4325,13 +4561,16 @@ llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
// and must all be equivalent. However, we are not allowed to
// throw away these explicit instantiations.
//
- // We don't currently support CUDA device code spread out across multiple TUs,
+ // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU,
// so say that CUDA templates are either external (for kernels) or internal.
- // This lets llvm perform aggressive inter-procedural optimizations.
+ // This lets llvm perform aggressive inter-procedural optimizations. For
+ // -fgpu-rdc case, device function calls across multiple TU's are allowed,
+ // therefore we need to follow the normal linkage paradigm.
if (Linkage == GVA_StrongODR) {
- if (Context.getLangOpts().AppleKext)
+ if (getLangOpts().AppleKext)
return llvm::Function::ExternalLinkage;
- if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice)
+ if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
+ !getLangOpts().GPURelocatableDeviceCode)
return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage
: llvm::Function::InternalLinkage;
return llvm::Function::WeakODRLinkage;
@@ -4522,9 +4761,11 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
MaybeHandleStaticInExternC(D, Fn);
-
maybeSetTrivialComdat(*D, *Fn);
+ // Set CodeGen attributes that represent floating point environment.
+ setLLVMFunctionFEnvAttributes(D, Fn);
+
CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
setNonAliasAttributes(GD, Fn);
@@ -4533,7 +4774,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
AddGlobalCtor(Fn, CA->getPriority());
if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
- AddGlobalDtor(Fn, DA->getPriority());
+ AddGlobalDtor(Fn, DA->getPriority(), true);
if (D->hasAttr<AnnotateAttr>())
AddGlobalAnnotations(D, Fn);
}
@@ -4572,8 +4813,10 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
llvm::PointerType::getUnqual(DeclTy),
/*D=*/nullptr);
- LT = getLLVMLinkageVarDefinition(cast<VarDecl>(GD.getDecl()),
- D->getType().isConstQualified());
+ if (const auto *VD = dyn_cast<VarDecl>(GD.getDecl()))
+ LT = getLLVMLinkageVarDefinition(VD, D->getType().isConstQualified());
+ else
+ LT = getFunctionLinkage(GD);
}
// Create the new alias itself, but don't set a name yet.
@@ -4896,6 +5139,8 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
switch (Triple.getObjectFormat()) {
case llvm::Triple::UnknownObjectFormat:
llvm_unreachable("unknown file format");
+ case llvm::Triple::GOFF:
+ llvm_unreachable("GOFF is not yet implemented");
case llvm::Triple::XCOFF:
llvm_unreachable("XCOFF is not yet implemented");
case llvm::Triple::COFF:
@@ -5373,16 +5618,21 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
Spec->hasDefinition())
DI->completeTemplateDefinition(*Spec);
} LLVM_FALLTHROUGH;
- case Decl::CXXRecord:
- if (CGDebugInfo *DI = getModuleDebugInfo())
+ case Decl::CXXRecord: {
+ CXXRecordDecl *CRD = cast<CXXRecordDecl>(D);
+ if (CGDebugInfo *DI = getModuleDebugInfo()) {
+ if (CRD->hasDefinition())
+ DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
if (auto *ES = D->getASTContext().getExternalSource())
if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
- DI->completeUnusedClass(cast<CXXRecordDecl>(*D));
+ DI->completeUnusedClass(*CRD);
+ }
// Emit any static data members, they may be definitions.
- for (auto *I : cast<CXXRecordDecl>(D)->decls())
+ for (auto *I : CRD->decls())
if (isa<VarDecl>(I) || isa<CXXRecordDecl>(I))
EmitTopLevelDecl(I);
break;
+ }
// No code generation needed.
case Decl::UsingShadow:
case Decl::ClassTemplate:
@@ -5568,6 +5818,25 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
EmitOMPRequiresDecl(cast<OMPRequiresDecl>(D));
break;
+ case Decl::Typedef:
+ case Decl::TypeAlias: // using foo = bar; [C++11]
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitAndRetainType(
+ getContext().getTypedefType(cast<TypedefNameDecl>(D)));
+ break;
+
+ case Decl::Record:
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ if (cast<RecordDecl>(D)->getDefinition())
+ DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
+ break;
+
+ case Decl::Enum:
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ if (cast<EnumDecl>(D)->getDefinition())
+ DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(D)));
+ break;
+
default:
// Make sure we handled everything we should, every other kind is a
// non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
@@ -6006,16 +6275,17 @@ CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T,
*BaseInfo = LValueBaseInfo(AlignmentSource::Type);
CharUnits Alignment;
- // For C++ class pointees, we don't know whether we're pointing at a
- // base or a complete object, so we generally need to use the
- // non-virtual alignment.
const CXXRecordDecl *RD;
- if (forPointeeType && !AlignForArray && (RD = T->getAsCXXRecordDecl())) {
+ if (T.getQualifiers().hasUnaligned()) {
+ Alignment = CharUnits::One();
+ } else if (forPointeeType && !AlignForArray &&
+ (RD = T->getAsCXXRecordDecl())) {
+ // For C++ class pointees, we don't know whether we're pointing at a
+ // base or a complete object, so we generally need to use the
+ // non-virtual alignment.
Alignment = getClassPointerAlignment(RD);
} else {
Alignment = getContext().getTypeAlignInChars(T);
- if (T.getQualifiers().hasUnaligned())
- Alignment = CharUnits::One();
}
// Cap to the global maximum type alignment unless the alignment
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index a6c4a1f7b278..618e2f857b07 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -396,10 +396,6 @@ private:
/// emitted when the translation unit is complete.
CtorList GlobalDtors;
- /// A unique trailing identifier as a part of sinit/sterm function when
- /// UseSinitAndSterm of CXXABI is set as true.
- std::string GlobalUniqueModuleId;
-
/// An ordered map of canonical GlobalDecls to their mangled names.
llvm::MapVector<GlobalDecl, StringRef> MangledDeclNames;
llvm::StringMap<GlobalDecl, llvm::BumpPtrAllocator> Manglings;
@@ -417,6 +413,9 @@ private:
/// Map used to get unique annotation strings.
llvm::StringMap<llvm::Constant*> AnnotationStrings;
+ /// Used for uniquing of annotation arguments.
+ llvm::DenseMap<unsigned, llvm::Constant *> AnnotationArgs;
+
llvm::StringMap<llvm::GlobalVariable *> CFConstantStringMap;
llvm::DenseMap<llvm::Constant *, llvm::GlobalVariable *> ConstantStringMap;
@@ -607,9 +606,11 @@ public:
return *ObjCData;
}
- // Version checking function, used to implement ObjC's @available:
+ // Version checking functions, used to implement ObjC's @available:
// i32 @__isOSVersionAtLeast(i32, i32, i32)
llvm::FunctionCallee IsOSVersionAtLeastFn = nullptr;
+ // i32 @__isPlatformVersionAtLeast(i32, i32, i32, i32)
+ llvm::FunctionCallee IsPlatformVersionAtLeastFn = nullptr;
InstrProfStats &getPGOStats() { return PGOStats; }
llvm::IndexedInstrProfReader *getPGOReader() const { return PGOReader.get(); }
@@ -819,8 +820,7 @@ public:
llvm::Function *CreateGlobalInitOrCleanUpFunction(
llvm::FunctionType *ty, const Twine &name, const CGFunctionInfo &FI,
- SourceLocation Loc = SourceLocation(), bool TLS = false,
- bool IsExternalLinkage = false);
+ SourceLocation Loc = SourceLocation(), bool TLS = false);
/// Return the AST address space of the underlying global variable for D, as
/// determined by its declaration. Normally this is the same as the address
@@ -864,6 +864,10 @@ public:
/// Get the address of a GUID.
ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD);
+ /// Get the address of a template parameter object.
+ ConstantAddress
+ GetAddrOfTemplateParamObject(const TemplateParamObjectDecl *TPO);
+
/// Get the address of the thunk for the given global decl.
llvm::Constant *GetAddrOfThunk(StringRef Name, llvm::Type *FnTy,
GlobalDecl GD);
@@ -1059,6 +1063,12 @@ public:
DtorFn.getCallee(), nullptr);
}
+ /// Add an sterm finalizer to its own llvm.global_dtors entry.
+ void AddCXXStermFinalizerToGlobalDtor(llvm::Function *StermFinalizer,
+ int Priority) {
+ AddGlobalDtor(StermFinalizer, Priority);
+ }
+
/// Create or return a runtime function declaration with the specified type
/// and name. If \p AssumeConvergent is true, the call will have the
/// convergent attribute added.
@@ -1067,16 +1077,6 @@ public:
llvm::AttributeList ExtraAttrs = llvm::AttributeList(),
bool Local = false, bool AssumeConvergent = false);
- /// Create or return a runtime function declaration with the specified type
- /// and name. This will automatically add the convergent attribute to the
- /// function declaration.
- llvm::FunctionCallee CreateConvergentRuntimeFunction(
- llvm::FunctionType *Ty, StringRef Name,
- llvm::AttributeList ExtraAttrs = llvm::AttributeList(),
- bool Local = false) {
- return CreateRuntimeFunction(Ty, Name, ExtraAttrs, Local, true);
- }
-
/// Create a new runtime global variable with the specified type and name.
llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty,
StringRef Name);
@@ -1136,6 +1136,10 @@ public:
/// definition.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
+ /// Set the LLVM function attributes that represent floating point
+ /// environment.
+ void setLLVMFunctionFEnvAttributes(const FunctionDecl *D, llvm::Function *F);
+
/// Return true iff the given type uses 'sret' when used as a return type.
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
@@ -1242,6 +1246,9 @@ public:
/// Emit the annotation line number.
llvm::Constant *EmitAnnotationLineNo(SourceLocation L);
+ /// Emit additional args of the annotation.
+ llvm::Constant *EmitAnnotationArgs(const AnnotateAttr *Attr);
+
/// Generate the llvm::ConstantStruct which contains the annotation
/// information for a given GlobalValue. The annotation struct is
/// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
@@ -1270,6 +1277,10 @@ public:
bool imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
StringRef Category = StringRef()) const;
+ /// Returns true if function at the given location should be excluded from
+ /// profile instrumentation.
+ bool isProfileInstrExcluded(llvm::Function *Fn, SourceLocation Loc) const;
+
SanitizerMetadata *getSanitizerMetadata() {
return SanitizerMD.get();
}
@@ -1326,8 +1337,11 @@ public:
/// a virtual function call could be made which ends up being dispatched to a
/// member function of this class. This scope can be wider than the visibility
/// of the class itself when the class has a more-visible dynamic base class.
+ /// The client should pass in an empty Visited set, which is used to prevent
+ /// redundant recursive processing.
llvm::GlobalObject::VCallVisibility
- GetVCallVisibilityLevel(const CXXRecordDecl *RD);
+ GetVCallVisibilityLevel(const CXXRecordDecl *RD,
+ llvm::DenseSet<const CXXRecordDecl *> &Visited);
/// Emit type metadata for the given vtable using the given layout.
void EmitVTableTypeMetadata(const CXXRecordDecl *RD,
@@ -1471,7 +1485,8 @@ private:
// FIXME: Hardcoding priority here is gross.
void AddGlobalCtor(llvm::Function *Ctor, int Priority = 65535,
llvm::Constant *AssociatedData = nullptr);
- void AddGlobalDtor(llvm::Function *Dtor, int Priority = 65535);
+ void AddGlobalDtor(llvm::Function *Dtor, int Priority = 65535,
+ bool IsDtorAttrFunc = false);
/// EmitCtorList - Generates a global array of functions and priorities using
/// the given list and name. This array will have appending linkage and is
@@ -1501,6 +1516,11 @@ private:
/// __cxa_atexit, if it is available, or atexit otherwise.
void registerGlobalDtorsWithAtExit();
+ // When using sinit and sterm functions, unregister
+ // __attribute__((destructor)) annotated functions which were previously
+ // registered by the atexit subroutine using unatexit.
+ void unregisterGlobalDtorsWithUnAtExit();
+
void emitMultiVersionFunctions();
/// Emit any vtables which we deferred and still have a use for.
diff --git a/clang/lib/CodeGen/CodeGenPGO.cpp b/clang/lib/CodeGen/CodeGenPGO.cpp
index e810f608ab78..08ae87785065 100644
--- a/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -160,10 +160,13 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
PGOHash Hash;
/// The map of statements to counters.
llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
+ /// The profile version.
+ uint64_t ProfileVersion;
- MapRegionCounters(PGOHashVersion HashVersion,
+ MapRegionCounters(PGOHashVersion HashVersion, uint64_t ProfileVersion,
llvm::DenseMap<const Stmt *, unsigned> &CounterMap)
- : NextCounter(0), Hash(HashVersion), CounterMap(CounterMap) {}
+ : NextCounter(0), Hash(HashVersion), CounterMap(CounterMap),
+ ProfileVersion(ProfileVersion) {}
// Blocks and lambdas are handled as separate functions, so we need not
// traverse them in the parent context.
@@ -203,6 +206,18 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return Type;
}
+ /// The RHS of all logical operators gets a fresh counter in order to count
+ /// how many times the RHS evaluates to true or false, depending on the
+ /// semantics of the operator. This is only valid for ">= v7" of the profile
+ /// version so that we facilitate backward compatibility.
+ bool VisitBinaryOperator(BinaryOperator *S) {
+ if (ProfileVersion >= llvm::IndexedInstrProf::Version7)
+ if (S->isLogicalOp() &&
+ CodeGenFunction::isInstrumentedCondition(S->getRHS()))
+ CounterMap[S->getRHS()] = NextCounter++;
+ return Base::VisitBinaryOperator(S);
+ }
+
/// Include \p S in the function hash.
bool VisitStmt(Stmt *S) {
auto Type = updateCounterMappings(S);
@@ -773,6 +788,11 @@ void CodeGenPGO::assignRegionCounters(GlobalDecl GD, llvm::Function *Fn) {
if (!D->hasBody())
return;
+ // Skip CUDA/HIP kernel launch stub functions.
+ if (CGM.getLangOpts().CUDA && !CGM.getLangOpts().CUDAIsDevice &&
+ D->hasAttr<CUDAGlobalAttr>())
+ return;
+
bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
llvm::IndexedInstrProfReader *PGOReader = CGM.getPGOReader();
if (!InstrumentRegions && !PGOReader)
@@ -791,6 +811,9 @@ void CodeGenPGO::assignRegionCounters(GlobalDecl GD, llvm::Function *Fn) {
if (isa<CXXDestructorDecl>(D) && GD.getDtorType() != Dtor_Base)
return;
+ if (Fn->hasFnAttribute(llvm::Attribute::NoProfile))
+ return;
+
CGM.ClearUnusedCoverageMapping(D);
setFuncName(Fn);
@@ -809,11 +832,14 @@ void CodeGenPGO::mapRegionCounters(const Decl *D) {
// Use the latest hash version when inserting instrumentation, but use the
// version in the indexed profile if we're reading PGO data.
PGOHashVersion HashVersion = PGO_HASH_LATEST;
- if (auto *PGOReader = CGM.getPGOReader())
+ uint64_t ProfileVersion = llvm::IndexedInstrProf::Version;
+ if (auto *PGOReader = CGM.getPGOReader()) {
HashVersion = getPGOHashVersion(PGOReader, CGM);
+ ProfileVersion = PGOReader->getVersion();
+ }
RegionCounterMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
- MapRegionCounters Walker(HashVersion, *RegionCounterMap);
+ MapRegionCounters Walker(HashVersion, ProfileVersion, *RegionCounterMap);
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
Walker.TraverseDecl(const_cast<FunctionDecl *>(FD));
else if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(D))
@@ -831,6 +857,18 @@ bool CodeGenPGO::skipRegionMappingForDecl(const Decl *D) {
if (!D->getBody())
return true;
+ // Skip host-only functions in the CUDA device compilation and device-only
+ // functions in the host compilation. Just roughly filter them out based on
+ // the function attributes. If there are effectively host-only or device-only
+ // ones, their coverage mapping may still be generated.
+ if (CGM.getLangOpts().CUDA &&
+ ((CGM.getLangOpts().CUDAIsDevice && !D->hasAttr<CUDADeviceAttr>() &&
+ !D->hasAttr<CUDAGlobalAttr>()) ||
+ (!CGM.getLangOpts().CUDAIsDevice &&
+ (D->hasAttr<CUDAGlobalAttr>() ||
+ (!D->hasAttr<CUDAHostAttr>() && D->hasAttr<CUDADeviceAttr>())))))
+ return true;
+
// Don't map the functions in system headers.
const auto &SM = CGM.getContext().getSourceManager();
auto Loc = D->getBody()->getBeginLoc();
@@ -1021,7 +1059,7 @@ static uint32_t scaleBranchWeight(uint64_t Weight, uint64_t Scale) {
}
llvm::MDNode *CodeGenFunction::createProfileWeights(uint64_t TrueCount,
- uint64_t FalseCount) {
+ uint64_t FalseCount) const {
// Check for empty weights.
if (!TrueCount && !FalseCount)
return nullptr;
@@ -1035,7 +1073,7 @@ llvm::MDNode *CodeGenFunction::createProfileWeights(uint64_t TrueCount,
}
llvm::MDNode *
-CodeGenFunction::createProfileWeights(ArrayRef<uint64_t> Weights) {
+CodeGenFunction::createProfileWeights(ArrayRef<uint64_t> Weights) const {
// We need at least two elements to create meaningful weights.
if (Weights.size() < 2)
return nullptr;
@@ -1057,8 +1095,9 @@ CodeGenFunction::createProfileWeights(ArrayRef<uint64_t> Weights) {
return MDHelper.createBranchWeights(ScaledWeights);
}
-llvm::MDNode *CodeGenFunction::createProfileWeightsForLoop(const Stmt *Cond,
- uint64_t LoopCount) {
+llvm::MDNode *
+CodeGenFunction::createProfileWeightsForLoop(const Stmt *Cond,
+ uint64_t LoopCount) const {
if (!PGO.haveRegionCounts())
return nullptr;
Optional<uint64_t> CondCount = PGO.getStmtCount(Cond);
diff --git a/clang/lib/CodeGen/CodeGenPGO.h b/clang/lib/CodeGen/CodeGenPGO.h
index dda8c66b6db2..906c5e406d77 100644
--- a/clang/lib/CodeGen/CodeGenPGO.h
+++ b/clang/lib/CodeGen/CodeGenPGO.h
@@ -59,7 +59,7 @@ public:
/// Check if an execution count is known for a given statement. If so, return
/// true and put the value in Count; else return false.
- Optional<uint64_t> getStmtCount(const Stmt *S) {
+ Optional<uint64_t> getStmtCount(const Stmt *S) const {
if (!StmtCountMap)
return None;
auto I = StmtCountMap->find(S);
diff --git a/clang/lib/CodeGen/CodeGenTypeCache.h b/clang/lib/CodeGen/CodeGenTypeCache.h
index 20a3263c0b1a..f258234fb4d8 100644
--- a/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -41,6 +41,9 @@ struct CodeGenTypeCache {
/// int
llvm::IntegerType *IntTy;
+ /// char
+ llvm::IntegerType *CharTy;
+
/// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
union {
llvm::IntegerType *IntPtrTy;
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index d431c0263666..7537ac12f1c8 100644
--- a/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -52,20 +52,26 @@ void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
llvm::raw_svector_ostream OS(TypeName);
OS << RD->getKindName() << '.';
+ // FIXME: We probably want to make more tweaks to the printing policy. For
+ // example, we should probably enable PrintCanonicalTypes and
+ // FullyQualifiedNames.
+ PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy();
+ Policy.SuppressInlineNamespace = false;
+
// Name the codegen type after the typedef name
// if there is no tag type name available
if (RD->getIdentifier()) {
// FIXME: We should not have to check for a null decl context here.
// Right now we do it because the implicit Obj-C decls don't have one.
if (RD->getDeclContext())
- RD->printQualifiedName(OS);
+ RD->printQualifiedName(OS, Policy);
else
RD->printName(OS);
} else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
// FIXME: We should not have to check for a null decl context here.
// Right now we do it because the implicit Obj-C decls don't have one.
if (TDD->getDeclContext())
- TDD->printQualifiedName(OS);
+ TDD->printQualifiedName(OS, Policy);
else
TDD->printName(OS);
} else
@@ -93,7 +99,8 @@ llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
// If this is a bool type, or an ExtIntType in a bitfield representation,
// map this integer to the target-specified size.
- if ((ForBitField && T->isExtIntType()) || R->isIntegerTy(1))
+ if ((ForBitField && T->isExtIntType()) ||
+ (!T->isExtIntType() && R->isIntegerTy(1)))
return llvm::IntegerType::get(getLLVMContext(),
(unsigned)Context.getTypeSize(T));
@@ -533,99 +540,67 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case BuiltinType::OCLReserveID:
ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
break;
-#define GET_SVE_INT_VEC(BITS, ELTS) \
- llvm::ScalableVectorType::get( \
- llvm::IntegerType::get(getLLVMContext(), BITS), ELTS);
case BuiltinType::SveInt8:
case BuiltinType::SveUint8:
- return GET_SVE_INT_VEC(8, 16);
case BuiltinType::SveInt8x2:
case BuiltinType::SveUint8x2:
- return GET_SVE_INT_VEC(8, 32);
case BuiltinType::SveInt8x3:
case BuiltinType::SveUint8x3:
- return GET_SVE_INT_VEC(8, 48);
case BuiltinType::SveInt8x4:
case BuiltinType::SveUint8x4:
- return GET_SVE_INT_VEC(8, 64);
case BuiltinType::SveInt16:
case BuiltinType::SveUint16:
- return GET_SVE_INT_VEC(16, 8);
case BuiltinType::SveInt16x2:
case BuiltinType::SveUint16x2:
- return GET_SVE_INT_VEC(16, 16);
case BuiltinType::SveInt16x3:
case BuiltinType::SveUint16x3:
- return GET_SVE_INT_VEC(16, 24);
case BuiltinType::SveInt16x4:
case BuiltinType::SveUint16x4:
- return GET_SVE_INT_VEC(16, 32);
case BuiltinType::SveInt32:
case BuiltinType::SveUint32:
- return GET_SVE_INT_VEC(32, 4);
case BuiltinType::SveInt32x2:
case BuiltinType::SveUint32x2:
- return GET_SVE_INT_VEC(32, 8);
case BuiltinType::SveInt32x3:
case BuiltinType::SveUint32x3:
- return GET_SVE_INT_VEC(32, 12);
case BuiltinType::SveInt32x4:
case BuiltinType::SveUint32x4:
- return GET_SVE_INT_VEC(32, 16);
case BuiltinType::SveInt64:
case BuiltinType::SveUint64:
- return GET_SVE_INT_VEC(64, 2);
case BuiltinType::SveInt64x2:
case BuiltinType::SveUint64x2:
- return GET_SVE_INT_VEC(64, 4);
case BuiltinType::SveInt64x3:
case BuiltinType::SveUint64x3:
- return GET_SVE_INT_VEC(64, 6);
case BuiltinType::SveInt64x4:
case BuiltinType::SveUint64x4:
- return GET_SVE_INT_VEC(64, 8);
case BuiltinType::SveBool:
- return GET_SVE_INT_VEC(1, 16);
-#undef GET_SVE_INT_VEC
-#define GET_SVE_FP_VEC(TY, ISFP16, ELTS) \
- llvm::ScalableVectorType::get( \
- getTypeForFormat(getLLVMContext(), \
- Context.getFloatTypeSemantics(Context.TY), \
- /* UseNativeHalf = */ ISFP16), \
- ELTS);
case BuiltinType::SveFloat16:
- return GET_SVE_FP_VEC(HalfTy, true, 8);
case BuiltinType::SveFloat16x2:
- return GET_SVE_FP_VEC(HalfTy, true, 16);
case BuiltinType::SveFloat16x3:
- return GET_SVE_FP_VEC(HalfTy, true, 24);
case BuiltinType::SveFloat16x4:
- return GET_SVE_FP_VEC(HalfTy, true, 32);
case BuiltinType::SveFloat32:
- return GET_SVE_FP_VEC(FloatTy, false, 4);
case BuiltinType::SveFloat32x2:
- return GET_SVE_FP_VEC(FloatTy, false, 8);
case BuiltinType::SveFloat32x3:
- return GET_SVE_FP_VEC(FloatTy, false, 12);
case BuiltinType::SveFloat32x4:
- return GET_SVE_FP_VEC(FloatTy, false, 16);
case BuiltinType::SveFloat64:
- return GET_SVE_FP_VEC(DoubleTy, false, 2);
case BuiltinType::SveFloat64x2:
- return GET_SVE_FP_VEC(DoubleTy, false, 4);
case BuiltinType::SveFloat64x3:
- return GET_SVE_FP_VEC(DoubleTy, false, 6);
case BuiltinType::SveFloat64x4:
- return GET_SVE_FP_VEC(DoubleTy, false, 8);
case BuiltinType::SveBFloat16:
- return GET_SVE_FP_VEC(BFloat16Ty, false, 8);
case BuiltinType::SveBFloat16x2:
- return GET_SVE_FP_VEC(BFloat16Ty, false, 16);
case BuiltinType::SveBFloat16x3:
- return GET_SVE_FP_VEC(BFloat16Ty, false, 24);
- case BuiltinType::SveBFloat16x4:
- return GET_SVE_FP_VEC(BFloat16Ty, false, 32);
-#undef GET_SVE_FP_VEC
+ case BuiltinType::SveBFloat16x4: {
+ ASTContext::BuiltinVectorTypeInfo Info =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
+ return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
+ Info.EC.getKnownMinValue() *
+ Info.NumVectors);
+ }
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id: \
+ ResultType = \
+ llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \
+ break;
+#include "clang/Basic/PPCTypes.def"
case BuiltinType::Dependent:
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp
index 78b268f423cb..5c25c204cc0b 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -31,20 +31,73 @@
// is textually included.
#define COVMAP_V3
+static llvm::cl::opt<bool> EmptyLineCommentCoverage(
+ "emptyline-comment-coverage",
+ llvm::cl::desc("Emit emptylines and comment lines as skipped regions (only "
+ "disable it on test)"),
+ llvm::cl::init(true), llvm::cl::Hidden);
+
using namespace clang;
using namespace CodeGen;
using namespace llvm::coverage;
+CoverageSourceInfo *
+CoverageMappingModuleGen::setUpCoverageCallbacks(Preprocessor &PP) {
+ CoverageSourceInfo *CoverageInfo =
+ new CoverageSourceInfo(PP.getSourceManager());
+ PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(CoverageInfo));
+ if (EmptyLineCommentCoverage) {
+ PP.addCommentHandler(CoverageInfo);
+ PP.setEmptylineHandler(CoverageInfo);
+ PP.setPreprocessToken(true);
+ PP.setTokenWatcher([CoverageInfo](clang::Token Tok) {
+ // Update previous token location.
+ CoverageInfo->PrevTokLoc = Tok.getLocation();
+ if (Tok.getKind() != clang::tok::eod)
+ CoverageInfo->updateNextTokLoc(Tok.getLocation());
+ });
+ }
+ return CoverageInfo;
+}
+
+void CoverageSourceInfo::AddSkippedRange(SourceRange Range) {
+ if (EmptyLineCommentCoverage && !SkippedRanges.empty() &&
+ PrevTokLoc == SkippedRanges.back().PrevTokLoc &&
+ SourceMgr.isWrittenInSameFile(SkippedRanges.back().Range.getEnd(),
+ Range.getBegin()))
+ SkippedRanges.back().Range.setEnd(Range.getEnd());
+ else
+ SkippedRanges.push_back({Range, PrevTokLoc});
+}
+
void CoverageSourceInfo::SourceRangeSkipped(SourceRange Range, SourceLocation) {
- SkippedRanges.push_back(Range);
+ AddSkippedRange(Range);
+}
+
+void CoverageSourceInfo::HandleEmptyline(SourceRange Range) {
+ AddSkippedRange(Range);
+}
+
+bool CoverageSourceInfo::HandleComment(Preprocessor &PP, SourceRange Range) {
+ AddSkippedRange(Range);
+ return false;
+}
+
+void CoverageSourceInfo::updateNextTokLoc(SourceLocation Loc) {
+ if (!SkippedRanges.empty() && SkippedRanges.back().NextTokLoc.isInvalid())
+ SkippedRanges.back().NextTokLoc = Loc;
}
namespace {
/// A region of source code that can be mapped to a counter.
class SourceMappingRegion {
+ /// Primary Counter that is also used for Branch Regions for "True" branches.
Counter Count;
+ /// Secondary Counter used for Branch Regions for "False" branches.
+ Optional<Counter> FalseCount;
+
/// The region's starting location.
Optional<SourceLocation> LocStart;
@@ -65,8 +118,20 @@ public:
: Count(Count), LocStart(LocStart), LocEnd(LocEnd),
DeferRegion(DeferRegion), GapRegion(GapRegion) {}
+ SourceMappingRegion(Counter Count, Optional<Counter> FalseCount,
+ Optional<SourceLocation> LocStart,
+ Optional<SourceLocation> LocEnd, bool DeferRegion = false,
+ bool GapRegion = false)
+ : Count(Count), FalseCount(FalseCount), LocStart(LocStart),
+ LocEnd(LocEnd), DeferRegion(DeferRegion), GapRegion(GapRegion) {}
+
const Counter &getCounter() const { return Count; }
+ const Counter &getFalseCounter() const {
+ assert(FalseCount && "Region has no alternate counter");
+ return *FalseCount;
+ }
+
void setCounter(Counter C) { Count = C; }
bool hasStartLoc() const { return LocStart.hasValue(); }
@@ -97,6 +162,8 @@ public:
bool isGap() const { return GapRegion; }
void setGap(bool Gap) { GapRegion = Gap; }
+
+ bool isBranch() const { return FalseCount.hasValue(); }
};
/// Spelling locations for the start and end of a source region.
@@ -274,8 +341,31 @@ public:
return None;
}
+ /// This shrinks the skipped range if it spans a line that contains a
+ /// non-comment token. If shrinking the skipped range would make it empty,
+ /// this returns None.
+ Optional<SpellingRegion> adjustSkippedRange(SourceManager &SM,
+ SourceLocation LocStart,
+ SourceLocation LocEnd,
+ SourceLocation PrevTokLoc,
+ SourceLocation NextTokLoc) {
+ SpellingRegion SR{SM, LocStart, LocEnd};
+ SR.ColumnStart = 1;
+ if (PrevTokLoc.isValid() && SM.isWrittenInSameFile(LocStart, PrevTokLoc) &&
+ SR.LineStart == SM.getSpellingLineNumber(PrevTokLoc))
+ SR.LineStart++;
+ if (NextTokLoc.isValid() && SM.isWrittenInSameFile(LocEnd, NextTokLoc) &&
+ SR.LineEnd == SM.getSpellingLineNumber(NextTokLoc)) {
+ SR.LineEnd--;
+ SR.ColumnEnd++;
+ }
+ if (SR.isInSourceOrder())
+ return SR;
+ return None;
+ }
+
/// Gather all the regions that were skipped by the preprocessor
- /// using the constructs like #if.
+ /// using the constructs like #if or comments.
void gatherSkippedRegions() {
/// An array of the minimum lineStarts and the maximum lineEnds
/// for mapping regions from the appropriate source files.
@@ -291,18 +381,23 @@ public:
}
auto SkippedRanges = CVM.getSourceInfo().getSkippedRanges();
- for (const auto &I : SkippedRanges) {
- auto LocStart = I.getBegin();
- auto LocEnd = I.getEnd();
+ for (auto &I : SkippedRanges) {
+ SourceRange Range = I.Range;
+ auto LocStart = Range.getBegin();
+ auto LocEnd = Range.getEnd();
assert(SM.isWrittenInSameFile(LocStart, LocEnd) &&
"region spans multiple files");
auto CovFileID = getCoverageFileID(LocStart);
if (!CovFileID)
continue;
- SpellingRegion SR{SM, LocStart, LocEnd};
+ Optional<SpellingRegion> SR =
+ adjustSkippedRange(SM, LocStart, LocEnd, I.PrevTokLoc, I.NextTokLoc);
+ if (!SR.hasValue())
+ continue;
auto Region = CounterMappingRegion::makeSkipped(
- *CovFileID, SR.LineStart, SR.ColumnStart, SR.LineEnd, SR.ColumnEnd);
+ *CovFileID, SR->LineStart, SR->ColumnStart, SR->LineEnd,
+ SR->ColumnEnd);
// Make sure that we only collect the regions that are inside
// the source code of this function.
if (Region.LineStart >= FileLineRanges[*CovFileID].first &&
@@ -348,6 +443,10 @@ public:
MappingRegions.push_back(CounterMappingRegion::makeGapRegion(
Region.getCounter(), *CovFileID, SR.LineStart, SR.ColumnStart,
SR.LineEnd, SR.ColumnEnd));
+ } else if (Region.isBranch()) {
+ MappingRegions.push_back(CounterMappingRegion::makeBranchRegion(
+ Region.getCounter(), Region.getFalseCounter(), *CovFileID,
+ SR.LineStart, SR.ColumnStart, SR.LineEnd, SR.ColumnEnd));
} else {
MappingRegions.push_back(CounterMappingRegion::makeRegion(
Region.getCounter(), *CovFileID, SR.LineStart, SR.ColumnStart,
@@ -486,12 +585,16 @@ struct CounterCoverageMappingBuilder
/// Returns the index on the stack where the region was pushed. This can be
/// used with popRegions to exit a "scope", ending the region that was pushed.
size_t pushRegion(Counter Count, Optional<SourceLocation> StartLoc = None,
- Optional<SourceLocation> EndLoc = None) {
- if (StartLoc) {
+ Optional<SourceLocation> EndLoc = None,
+ Optional<Counter> FalseCount = None) {
+
+ if (StartLoc && !FalseCount.hasValue()) {
MostRecentLocation = *StartLoc;
completeDeferred(Count, MostRecentLocation);
}
- RegionStack.emplace_back(Count, StartLoc, EndLoc);
+
+ RegionStack.emplace_back(Count, FalseCount, StartLoc, EndLoc,
+ FalseCount.hasValue());
return RegionStack.size() - 1;
}
@@ -581,49 +684,64 @@ struct CounterCoverageMappingBuilder
SourceLocation EndLoc = Region.hasEndLoc()
? Region.getEndLoc()
: RegionStack[ParentIndex].getEndLoc();
+ bool isBranch = Region.isBranch();
size_t StartDepth = locationDepth(StartLoc);
size_t EndDepth = locationDepth(EndLoc);
while (!SM.isWrittenInSameFile(StartLoc, EndLoc)) {
bool UnnestStart = StartDepth >= EndDepth;
bool UnnestEnd = EndDepth >= StartDepth;
if (UnnestEnd) {
- // The region ends in a nested file or macro expansion. Create a
- // separate region for each expansion.
+ // The region ends in a nested file or macro expansion. If the
+ // region is not a branch region, create a separate region for each
+ // expansion, and for all regions, update the EndLoc. Branch
+ // regions should not be split in order to keep a straightforward
+ // correspondance between the region and its associated branch
+ // condition, even if the condition spans multiple depths.
SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc);
assert(SM.isWrittenInSameFile(NestedLoc, EndLoc));
- if (!isRegionAlreadyAdded(NestedLoc, EndLoc))
- SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
+ if (!isBranch && !isRegionAlreadyAdded(NestedLoc, EndLoc))
+ SourceRegions.emplace_back(Region.getCounter(), NestedLoc,
+ EndLoc);
EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc));
if (EndLoc.isInvalid())
- llvm::report_fatal_error("File exit not handled before popRegions");
+ llvm::report_fatal_error(
+ "File exit not handled before popRegions");
EndDepth--;
}
if (UnnestStart) {
- // The region begins in a nested file or macro expansion. Create a
- // separate region for each expansion.
+ // The region ends in a nested file or macro expansion. If the
+ // region is not a branch region, create a separate region for each
+ // expansion, and for all regions, update the StartLoc. Branch
+ // regions should not be split in order to keep a straightforward
+ // correspondance between the region and its associated branch
+ // condition, even if the condition spans multiple depths.
SourceLocation NestedLoc = getEndOfFileOrMacro(StartLoc);
assert(SM.isWrittenInSameFile(StartLoc, NestedLoc));
- if (!isRegionAlreadyAdded(StartLoc, NestedLoc))
- SourceRegions.emplace_back(Region.getCounter(), StartLoc, NestedLoc);
+ if (!isBranch && !isRegionAlreadyAdded(StartLoc, NestedLoc))
+ SourceRegions.emplace_back(Region.getCounter(), StartLoc,
+ NestedLoc);
StartLoc = getIncludeOrExpansionLoc(StartLoc);
if (StartLoc.isInvalid())
- llvm::report_fatal_error("File exit not handled before popRegions");
+ llvm::report_fatal_error(
+ "File exit not handled before popRegions");
StartDepth--;
}
}
Region.setStartLoc(StartLoc);
Region.setEndLoc(EndLoc);
- MostRecentLocation = EndLoc;
- // If this region happens to span an entire expansion, we need to make
- // sure we don't overlap the parent region with it.
- if (StartLoc == getStartOfFileOrMacro(StartLoc) &&
- EndLoc == getEndOfFileOrMacro(EndLoc))
- MostRecentLocation = getIncludeOrExpansionLoc(EndLoc);
+ if (!isBranch) {
+ MostRecentLocation = EndLoc;
+ // If this region happens to span an entire expansion, we need to
+ // make sure we don't overlap the parent region with it.
+ if (StartLoc == getStartOfFileOrMacro(StartLoc) &&
+ EndLoc == getEndOfFileOrMacro(EndLoc))
+ MostRecentLocation = getIncludeOrExpansionLoc(EndLoc);
+ }
assert(SM.isWrittenInSameFile(Region.getBeginLoc(), EndLoc));
assert(SpellingRegion(SM, Region).isInSourceOrder());
@@ -682,14 +800,61 @@ struct CounterCoverageMappingBuilder
return ExitCount;
}
+ /// Determine whether the given condition can be constant folded.
+ bool ConditionFoldsToBool(const Expr *Cond) {
+ Expr::EvalResult Result;
+ return (Cond->EvaluateAsInt(Result, CVM.getCodeGenModule().getContext()));
+ }
+
+ /// Create a Branch Region around an instrumentable condition for coverage
+ /// and add it to the function's SourceRegions. A branch region tracks a
+ /// "True" counter and a "False" counter for boolean expressions that
+ /// result in the generation of a branch.
+ void createBranchRegion(const Expr *C, Counter TrueCnt, Counter FalseCnt) {
+ // Check for NULL conditions.
+ if (!C)
+ return;
+
+ // Ensure we are an instrumentable condition (i.e. no "&&" or "||"). Push
+ // region onto RegionStack but immediately pop it (which adds it to the
+ // function's SourceRegions) because it doesn't apply to any other source
+ // code other than the Condition.
+ if (CodeGenFunction::isInstrumentedCondition(C)) {
+ // If a condition can fold to true or false, the corresponding branch
+ // will be removed. Create a region with both counters hard-coded to
+ // zero. This allows us to visualize them in a special way.
+ // Alternatively, we can prevent any optimization done via
+ // constant-folding by ensuring that ConstantFoldsToSimpleInteger() in
+ // CodeGenFunction.c always returns false, but that is very heavy-handed.
+ if (ConditionFoldsToBool(C))
+ popRegions(pushRegion(Counter::getZero(), getStart(C), getEnd(C),
+ Counter::getZero()));
+ else
+ // Otherwise, create a region with the True counter and False counter.
+ popRegions(pushRegion(TrueCnt, getStart(C), getEnd(C), FalseCnt));
+ }
+ }
+
+ /// Create a Branch Region around a SwitchCase for code coverage
+ /// and add it to the function's SourceRegions.
+ void createSwitchCaseRegion(const SwitchCase *SC, Counter TrueCnt,
+ Counter FalseCnt) {
+ // Push region onto RegionStack but immediately pop it (which adds it to
+ // the function's SourceRegions) because it doesn't apply to any other
+ // source other than the SwitchCase.
+ popRegions(pushRegion(TrueCnt, getStart(SC), SC->getColonLoc(), FalseCnt));
+ }
+
/// Check whether a region with bounds \c StartLoc and \c EndLoc
/// is already added to \c SourceRegions.
- bool isRegionAlreadyAdded(SourceLocation StartLoc, SourceLocation EndLoc) {
+ bool isRegionAlreadyAdded(SourceLocation StartLoc, SourceLocation EndLoc,
+ bool isBranch = false) {
return SourceRegions.rend() !=
std::find_if(SourceRegions.rbegin(), SourceRegions.rend(),
[&](const SourceMappingRegion &Region) {
return Region.getBeginLoc() == StartLoc &&
- Region.getEndLoc() == EndLoc;
+ Region.getEndLoc() == EndLoc &&
+ Region.isBranch() == isBranch;
});
}
@@ -706,7 +871,7 @@ struct CounterCoverageMappingBuilder
if (getRegion().hasEndLoc() &&
MostRecentLocation == getEndOfFileOrMacro(MostRecentLocation) &&
isRegionAlreadyAdded(getStartOfFileOrMacro(MostRecentLocation),
- MostRecentLocation))
+ MostRecentLocation, getRegion().isBranch()))
MostRecentLocation = getIncludeOrExpansionLoc(MostRecentLocation);
}
@@ -750,9 +915,14 @@ struct CounterCoverageMappingBuilder
// The most nested region for each start location is the one with the
// correct count. We avoid creating redundant regions by stopping once
// we've seen this region.
- if (StartLocs.insert(Loc).second)
- SourceRegions.emplace_back(I.getCounter(), Loc,
- getEndOfFileOrMacro(Loc));
+ if (StartLocs.insert(Loc).second) {
+ if (I.isBranch())
+ SourceRegions.emplace_back(I.getCounter(), I.getFalseCounter(), Loc,
+ getEndOfFileOrMacro(Loc), I.isBranch());
+ else
+ SourceRegions.emplace_back(I.getCounter(), Loc,
+ getEndOfFileOrMacro(Loc));
+ }
Loc = getIncludeOrExpansionLoc(Loc);
}
I.setStartLoc(getPreciseTokenLocEnd(Loc));
@@ -993,6 +1163,10 @@ struct CounterCoverageMappingBuilder
addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount));
if (OutCount != ParentCount)
pushRegion(OutCount);
+
+ // Create Branch Region around condition.
+ createBranchRegion(S->getCond(), BodyCount,
+ subtractCounters(CondCount, BodyCount));
}
void VisitDoStmt(const DoStmt *S) {
@@ -1014,6 +1188,10 @@ struct CounterCoverageMappingBuilder
addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount));
if (OutCount != ParentCount)
pushRegion(OutCount);
+
+ // Create Branch Region around condition.
+ createBranchRegion(S->getCond(), BodyCount,
+ subtractCounters(CondCount, BodyCount));
}
void VisitForStmt(const ForStmt *S) {
@@ -1061,6 +1239,10 @@ struct CounterCoverageMappingBuilder
subtractCounters(CondCount, BodyCount));
if (OutCount != ParentCount)
pushRegion(OutCount);
+
+ // Create Branch Region around condition.
+ createBranchRegion(S->getCond(), BodyCount,
+ subtractCounters(CondCount, BodyCount));
}
void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
@@ -1090,6 +1272,10 @@ struct CounterCoverageMappingBuilder
addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount));
if (OutCount != ParentCount)
pushRegion(OutCount);
+
+ // Create Branch Region around condition.
+ createBranchRegion(S->getCond(), BodyCount,
+ subtractCounters(LoopCount, BodyCount));
}
void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
@@ -1154,6 +1340,7 @@ struct CounterCoverageMappingBuilder
BreakContinueStack.back().ContinueCount = addCounters(
BreakContinueStack.back().ContinueCount, BC.ContinueCount);
+ Counter ParentCount = getRegion().getCounter();
Counter ExitCount = getRegionCounter(S);
SourceLocation ExitLoc = getEnd(S);
pushRegion(ExitCount);
@@ -1162,6 +1349,28 @@ struct CounterCoverageMappingBuilder
// in a different file.
MostRecentLocation = getStart(S);
handleFileExit(ExitLoc);
+
+ // Create a Branch Region around each Case. Subtract the case's
+ // counter from the Parent counter to track the "False" branch count.
+ Counter CaseCountSum;
+ bool HasDefaultCase = false;
+ const SwitchCase *Case = S->getSwitchCaseList();
+ for (; Case; Case = Case->getNextSwitchCase()) {
+ HasDefaultCase = HasDefaultCase || isa<DefaultStmt>(Case);
+ CaseCountSum = addCounters(CaseCountSum, getRegionCounter(Case));
+ createSwitchCaseRegion(
+ Case, getRegionCounter(Case),
+ subtractCounters(ParentCount, getRegionCounter(Case)));
+ }
+
+ // If no explicit default case exists, create a branch region to represent
+ // the hidden branch, which will be added later by the CodeGen. This region
+ // will be associated with the switch statement's condition.
+ if (!HasDefaultCase) {
+ Counter DefaultTrue = subtractCounters(ParentCount, CaseCountSum);
+ Counter DefaultFalse = subtractCounters(ParentCount, DefaultTrue);
+ createBranchRegion(S->getCond(), DefaultTrue, DefaultFalse);
+ }
}
void VisitSwitchCase(const SwitchCase *S) {
@@ -1222,6 +1431,10 @@ struct CounterCoverageMappingBuilder
if (OutCount != ParentCount)
pushRegion(OutCount);
+
+ // Create Branch Region around condition.
+ createBranchRegion(S->getCond(), ThenCount,
+ subtractCounters(ParentCount, ThenCount));
}
void VisitCXXTryStmt(const CXXTryStmt *S) {
@@ -1265,6 +1478,10 @@ struct CounterCoverageMappingBuilder
extendRegion(E->getFalseExpr());
propagateCounts(subtractCounters(ParentCount, TrueCount),
E->getFalseExpr());
+
+ // Create Branch Region around condition.
+ createBranchRegion(E->getCond(), TrueCount,
+ subtractCounters(ParentCount, TrueCount));
}
void VisitBinLAnd(const BinaryOperator *E) {
@@ -1272,8 +1489,26 @@ struct CounterCoverageMappingBuilder
propagateCounts(getRegion().getCounter(), E->getLHS());
handleFileExit(getEnd(E->getLHS()));
+ // Counter tracks the right hand side of a logical and operator.
extendRegion(E->getRHS());
propagateCounts(getRegionCounter(E), E->getRHS());
+
+ // Extract the RHS's Execution Counter.
+ Counter RHSExecCnt = getRegionCounter(E);
+
+ // Extract the RHS's "True" Instance Counter.
+ Counter RHSTrueCnt = getRegionCounter(E->getRHS());
+
+ // Extract the Parent Region Counter.
+ Counter ParentCnt = getRegion().getCounter();
+
+ // Create Branch Region around LHS condition.
+ createBranchRegion(E->getLHS(), RHSExecCnt,
+ subtractCounters(ParentCnt, RHSExecCnt));
+
+ // Create Branch Region around RHS condition.
+ createBranchRegion(E->getRHS(), RHSTrueCnt,
+ subtractCounters(RHSExecCnt, RHSTrueCnt));
}
void VisitBinLOr(const BinaryOperator *E) {
@@ -1281,8 +1516,26 @@ struct CounterCoverageMappingBuilder
propagateCounts(getRegion().getCounter(), E->getLHS());
handleFileExit(getEnd(E->getLHS()));
+ // Counter tracks the right hand side of a logical or operator.
extendRegion(E->getRHS());
propagateCounts(getRegionCounter(E), E->getRHS());
+
+ // Extract the RHS's Execution Counter.
+ Counter RHSExecCnt = getRegionCounter(E);
+
+ // Extract the RHS's "False" Instance Counter.
+ Counter RHSFalseCnt = getRegionCounter(E->getRHS());
+
+ // Extract the Parent Region Counter.
+ Counter ParentCnt = getRegion().getCounter();
+
+ // Create Branch Region around LHS condition.
+ createBranchRegion(E->getLHS(), subtractCounters(ParentCnt, RHSExecCnt),
+ RHSExecCnt);
+
+ // Create Branch Region around RHS condition.
+ createBranchRegion(E->getRHS(), subtractCounters(RHSExecCnt, RHSFalseCnt),
+ RHSFalseCnt);
}
void VisitLambdaExpr(const LambdaExpr *LE) {
@@ -1291,13 +1544,6 @@ struct CounterCoverageMappingBuilder
}
};
-std::string normalizeFilename(StringRef Filename) {
- llvm::SmallString<256> Path(Filename);
- llvm::sys::fs::make_absolute(Path);
- llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
- return std::string(Path);
-}
-
} // end anonymous namespace
static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
@@ -1319,17 +1565,43 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
case CounterMappingRegion::GapRegion:
OS << "Gap,";
break;
+ case CounterMappingRegion::BranchRegion:
+ OS << "Branch,";
+ break;
}
OS << "File " << R.FileID << ", " << R.LineStart << ":" << R.ColumnStart
<< " -> " << R.LineEnd << ":" << R.ColumnEnd << " = ";
Ctx.dump(R.Count, OS);
+
+ if (R.Kind == CounterMappingRegion::BranchRegion) {
+ OS << ", ";
+ Ctx.dump(R.FalseCount, OS);
+ }
+
if (R.Kind == CounterMappingRegion::ExpansionRegion)
OS << " (Expanded file = " << R.ExpandedFileID << ")";
OS << "\n";
}
}
+CoverageMappingModuleGen::CoverageMappingModuleGen(
+ CodeGenModule &CGM, CoverageSourceInfo &SourceInfo)
+ : CGM(CGM), SourceInfo(SourceInfo) {
+ ProfilePrefixMap = CGM.getCodeGenOpts().ProfilePrefixMap;
+}
+
+std::string CoverageMappingModuleGen::normalizeFilename(StringRef Filename) {
+ llvm::SmallString<256> Path(Filename);
+ llvm::sys::fs::make_absolute(Path);
+ llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
+ for (const auto &Entry : ProfilePrefixMap) {
+ if (llvm::sys::path::replace_path_prefix(Path, Entry.first, Entry.second))
+ break;
+ }
+ return Path.str().str();
+}
+
static std::string getInstrProfSection(const CodeGenModule &CGM,
llvm::InstrProfSectKind SK) {
return llvm::getInstrProfSectionName(
diff --git a/clang/lib/CodeGen/CoverageMappingGen.h b/clang/lib/CodeGen/CoverageMappingGen.h
index 5d79d1e65670..b26f79be5316 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.h
+++ b/clang/lib/CodeGen/CoverageMappingGen.h
@@ -16,6 +16,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/raw_ostream.h"
@@ -29,15 +30,47 @@ class Preprocessor;
class Decl;
class Stmt;
+struct SkippedRange {
+ SourceRange Range;
+ // The location of token before the skipped source range.
+ SourceLocation PrevTokLoc;
+ // The location of token after the skipped source range.
+ SourceLocation NextTokLoc;
+
+ SkippedRange(SourceRange Range, SourceLocation PrevTokLoc = SourceLocation(),
+ SourceLocation NextTokLoc = SourceLocation())
+ : Range(Range), PrevTokLoc(PrevTokLoc), NextTokLoc(NextTokLoc) {}
+};
+
/// Stores additional source code information like skipped ranges which
/// is required by the coverage mapping generator and is obtained from
/// the preprocessor.
-class CoverageSourceInfo : public PPCallbacks {
- std::vector<SourceRange> SkippedRanges;
+class CoverageSourceInfo : public PPCallbacks,
+ public CommentHandler,
+ public EmptylineHandler {
+ // A vector of skipped source ranges and PrevTokLoc with NextTokLoc.
+ std::vector<SkippedRange> SkippedRanges;
+
+ SourceManager &SourceMgr;
+
public:
- ArrayRef<SourceRange> getSkippedRanges() const { return SkippedRanges; }
+ // Location of the token parsed before HandleComment is called. This is
+ // updated every time Preprocessor::Lex lexes a new token.
+ SourceLocation PrevTokLoc;
+
+ CoverageSourceInfo(SourceManager &SourceMgr) : SourceMgr(SourceMgr) {}
+
+ std::vector<SkippedRange> &getSkippedRanges() { return SkippedRanges; }
+
+ void AddSkippedRange(SourceRange Range);
void SourceRangeSkipped(SourceRange Range, SourceLocation EndifLoc) override;
+
+ void HandleEmptyline(SourceRange Range) override;
+
+ bool HandleComment(Preprocessor &PP, SourceRange Range) override;
+
+ void updateNextTokLoc(SourceLocation Loc);
};
namespace CodeGen {
@@ -60,14 +93,18 @@ class CoverageMappingModuleGen {
llvm::SmallDenseMap<const FileEntry *, unsigned, 8> FileEntries;
std::vector<llvm::Constant *> FunctionNames;
std::vector<FunctionInfo> FunctionRecords;
+ std::map<std::string, std::string> ProfilePrefixMap;
+
+ std::string normalizeFilename(StringRef Filename);
/// Emit a function record.
void emitFunctionMappingRecord(const FunctionInfo &Info,
uint64_t FilenamesRef);
public:
- CoverageMappingModuleGen(CodeGenModule &CGM, CoverageSourceInfo &SourceInfo)
- : CGM(CGM), SourceInfo(SourceInfo) {}
+ static CoverageSourceInfo *setUpCoverageCallbacks(Preprocessor &PP);
+
+ CoverageMappingModuleGen(CodeGenModule &CGM, CoverageSourceInfo &SourceInfo);
CoverageSourceInfo &getSourceInfo() const {
return SourceInfo;
@@ -87,6 +124,9 @@ public:
/// Return the coverage mapping translation unit file id
/// for the given file.
unsigned getFileID(const FileEntry *File);
+
+ /// Return an interface into CodeGenModule.
+ CodeGenModule &getCodeGenModule() { return CGM; }
};
/// Organizes the per-function state that is used while generating
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 80de2a6e3950..50fb30a95cbb 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -9,11 +9,11 @@
// This provides C++ code generation targeting the Itanium C++ ABI. The class
// in this file generates structures that follow the Itanium C++ ABI, which is
// documented at:
-// http://www.codesourcery.com/public/cxx-abi/abi.html
-// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
+// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
//
// It also supports the closely-related ARM ABI, documented at:
-// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+// https://developer.arm.com/documentation/ihi0041/g/
//
//===----------------------------------------------------------------------===//
@@ -361,10 +361,11 @@ public:
return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue();
// Otherwise, we need a thread wrapper unless we know that every
- // translation unit will emit the value as a constant. We rely on
- // ICE-ness not varying between translation units, which isn't actually
+ // translation unit will emit the value as a constant. We rely on the
+ // variable being constant-initialized in every translation unit if it's
+ // constant-initialized in any translation unit, which isn't actually
// guaranteed by the standard but is necessary for sanity.
- return InitDecl->isInitKnownICE() && InitDecl->isInitICE();
+ return InitDecl->hasConstantInitialization();
}
bool usesThreadWrapperFunction(const VarDecl *VD) const override {
@@ -485,9 +486,9 @@ public:
CharUnits cookieSize) override;
};
-class iOS64CXXABI : public ARMCXXABI {
+class AppleARM64CXXABI : public ARMCXXABI {
public:
- iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
+ AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
Use32BitVTableOffsetABI = true;
}
@@ -550,8 +551,8 @@ CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
case TargetCXXABI::WatchOS:
return new ARMCXXABI(CGM);
- case TargetCXXABI::iOS64:
- return new iOS64CXXABI(CGM);
+ case TargetCXXABI::AppleARM64:
+ return new AppleARM64CXXABI(CGM);
case TargetCXXABI::Fuchsia:
return new FuchsiaCXXABI(CGM);
@@ -770,7 +771,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
};
if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
- CGF.EmitTrapCheck(CheckResult);
+ CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
} else {
llvm::Value *AllVtables = llvm::MetadataAsValue::get(
CGM.getLLVMContext(),
@@ -1087,7 +1088,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
if (!MPD)
return EmitNullMemberPointer(MPT);
- CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
+ CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
return BuildMemberPointer(MD, ThisAdjustment);
@@ -2111,7 +2112,7 @@ CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
// The array cookie is a size_t; pad that up to the element alignment.
// The cookie is actually right-justified in that space.
return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
- CGM.getContext().getTypeAlignInChars(elementType));
+ CGM.getContext().getPreferredTypeAlignInChars(elementType));
}
Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
@@ -2128,7 +2129,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
// The size of the cookie.
CharUnits CookieSize =
- std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
+ std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
assert(CookieSize == getArrayCookieSizeImpl(ElementType));
// Compute an offset to the cookie.
@@ -2330,7 +2331,8 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGM.getDataLayout().getABITypeAlignment(guardTy));
}
}
- llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
+ llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
+ CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
// Create the guard variable if we don't already have it (as we
// might if we're double-emitting this function body).
@@ -2529,48 +2531,132 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
CGF.EmitNounwindRuntimeCall(atexit, args);
}
-void CodeGenModule::registerGlobalDtorsWithAtExit() {
+static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
+ StringRef FnName) {
+ // Create a function that registers/unregisters destructors that have the same
+ // priority.
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
+ llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
+ FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
+
+ return GlobalInitOrCleanupFn;
+}
+
+static FunctionDecl *
+createGlobalInitOrCleanupFnDecl(CodeGen::CodeGenModule &CGM, StringRef FnName) {
+ ASTContext &Ctx = CGM.getContext();
+ QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, {});
+ return FunctionDecl::Create(
+ Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
+ &Ctx.Idents.get(FnName), FunctionTy, nullptr, SC_Static, false, false);
+}
+
+void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
for (const auto &I : DtorsUsingAtExit) {
int Priority = I.first;
+ std::string GlobalCleanupFnName =
+ std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
+
+ llvm::Function *GlobalCleanupFn =
+ createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
+
+ FunctionDecl *GlobalCleanupFD =
+ createGlobalInitOrCleanupFnDecl(*this, GlobalCleanupFnName);
+
+ CodeGenFunction CGF(*this);
+ CGF.StartFunction(GlobalDecl(GlobalCleanupFD), getContext().VoidTy,
+ GlobalCleanupFn, getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation(), SourceLocation());
+
+ // Get the destructor function type, void(*)(void).
+ llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
+ llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
+
+ // Destructor functions are run/unregistered in non-ascending
+ // order of their priorities.
const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
+ auto itv = Dtors.rbegin();
+ while (itv != Dtors.rend()) {
+ llvm::Function *Dtor = *itv;
+
+ // We're assuming that the destructor function is something we can
+ // reasonably call with the correct CC. Go ahead and cast it to the
+ // right prototype.
+ llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
+ llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
+ llvm::Value *NeedsDestruct =
+ CGF.Builder.CreateIsNull(V, "needs_destruct");
+
+ llvm::BasicBlock *DestructCallBlock =
+ CGF.createBasicBlock("destruct.call");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
+ (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
+ // Check if unatexit returns a value of 0. If it does, jump to
+ // DestructCallBlock, otherwise jump to EndBlock directly.
+ CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
+
+ CGF.EmitBlock(DestructCallBlock);
+
+ // Emit the call to casted Dtor.
+ llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
+ // Make sure the call and the callee agree on calling convention.
+ CI->setCallingConv(Dtor->getCallingConv());
+
+ CGF.EmitBlock(EndBlock);
+
+ itv++;
+ }
+
+ CGF.FinishFunction();
+ AddGlobalDtor(GlobalCleanupFn, Priority);
+ }
+}
+
+void CodeGenModule::registerGlobalDtorsWithAtExit() {
+ for (const auto &I : DtorsUsingAtExit) {
+ int Priority = I.first;
+ std::string GlobalInitFnName =
+ std::string("__GLOBAL_init_") + llvm::to_string(Priority);
+ llvm::Function *GlobalInitFn =
+ createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
+ FunctionDecl *GlobalInitFD =
+ createGlobalInitOrCleanupFnDecl(*this, GlobalInitFnName);
+
+ CodeGenFunction CGF(*this);
+ CGF.StartFunction(GlobalDecl(GlobalInitFD), getContext().VoidTy,
+ GlobalInitFn, getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation(), SourceLocation());
- // Create a function that registers destructors that have the same priority.
- //
// Since constructor functions are run in non-descending order of their
// priorities, destructors are registered in non-descending order of their
// priorities, and since destructor functions are run in the reverse order
// of their registration, destructor functions are run in non-ascending
// order of their priorities.
- CodeGenFunction CGF(*this);
- std::string GlobalInitFnName =
- std::string("__GLOBAL_init_") + llvm::to_string(Priority);
- llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
- llvm::Function *GlobalInitFn = CreateGlobalInitOrCleanUpFunction(
- FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
- SourceLocation());
- ASTContext &Ctx = getContext();
- QualType ReturnTy = Ctx.VoidTy;
- QualType FunctionTy = Ctx.getFunctionType(ReturnTy, llvm::None, {});
- FunctionDecl *FD = FunctionDecl::Create(
- Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
- &Ctx.Idents.get(GlobalInitFnName), FunctionTy, nullptr, SC_Static,
- false, false);
- CGF.StartFunction(GlobalDecl(FD), ReturnTy, GlobalInitFn,
- getTypes().arrangeNullaryFunction(), FunctionArgList(),
- SourceLocation(), SourceLocation());
-
+ const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
for (auto *Dtor : Dtors) {
// Register the destructor function calling __cxa_atexit if it is
// available. Otherwise fall back on calling atexit.
- if (getCodeGenOpts().CXAAtExit)
+ if (getCodeGenOpts().CXAAtExit) {
emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
- else
- CGF.registerGlobalDtorWithAtExit(Dtor);
+ } else {
+ // Get the destructor function type, void(*)(void).
+ llvm::Type *dtorTy =
+ llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
+
+ // We're assuming that the destructor function is something we can
+ // reasonably call with the correct CC. Go ahead and cast it to the
+ // right prototype.
+ CGF.registerGlobalDtorWithAtExit(
+ llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
+ }
}
CGF.FinishFunction();
AddGlobalCtor(GlobalInitFn, Priority, nullptr);
}
+
+ if (getCXXABI().useSinitAndSterm())
+ unregisterGlobalDtorsWithUnAtExit();
}
/// Register a global destructor as best as we know how.
@@ -3092,6 +3178,9 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -4567,7 +4656,8 @@ void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
- FunctionArgList());
+ FunctionArgList(), D.getLocation(),
+ D.getInit()->getExprLoc());
// The unatexit subroutine unregisters __dtor functions that were previously
// registered by the atexit subroutine. If the referenced function is found,
@@ -4596,5 +4686,16 @@ void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
CGF.FinishFunction();
- CGM.AddCXXStermFinalizerEntry(StermFinalizer);
+ assert(!D.getAttr<InitPriorityAttr>() &&
+ "Prioritized sinit and sterm functions are not yet supported.");
+
+ if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
+ getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR)
+ // According to C++ [basic.start.init]p2, class template static data
+ // members (i.e., implicitly or explicitly instantiated specializations)
+ // have unordered initialization. As a consequence, we can put them into
+ // their own llvm.global_dtors entry.
+ CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
+ else
+ CGM.AddCXXStermFinalizerEntry(StermFinalizer);
}
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 45c6cb6b2e0d..cb0dc1d5d717 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -771,6 +771,9 @@ public:
LoadVTablePtr(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *RD) override;
+ virtual bool
+ isPermittedToBeHomogeneousAggregate(const CXXRecordDecl *RD) const override;
+
private:
typedef std::pair<const CXXRecordDecl *, CharUnits> VFTableIdTy;
typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalVariable *> VTablesMapTy;
@@ -816,34 +819,40 @@ private:
CGCXXABI::RecordArgABI
MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
+ // Use the default C calling convention rules for things that can be passed in
+ // registers, i.e. non-trivially copyable records or records marked with
+ // [[trivial_abi]].
+ if (RD->canPassInRegisters())
+ return RAA_Default;
+
switch (CGM.getTarget().getTriple().getArch()) {
default:
// FIXME: Implement for other architectures.
- return RAA_Default;
+ return RAA_Indirect;
case llvm::Triple::thumb:
- // Use the simple Itanium rules for now.
+ // Pass things indirectly for now because it is simple.
// FIXME: This is incompatible with MSVC for arguments with a dtor and no
// copy ctor.
- return !RD->canPassInRegisters() ? RAA_Indirect : RAA_Default;
+ return RAA_Indirect;
- case llvm::Triple::x86:
- // All record arguments are passed in memory on x86. Decide whether to
- // construct the object directly in argument memory, or to construct the
- // argument elsewhere and copy the bytes during the call.
+ case llvm::Triple::x86: {
+ // If the argument has *required* alignment greater than four bytes, pass
+ // it indirectly. Prior to MSVC version 19.14, passing overaligned
+ // arguments was not supported and resulted in a compiler error. In 19.14
+ // and later versions, such arguments are now passed indirectly.
+ TypeInfo Info = getContext().getTypeInfo(RD->getTypeForDecl());
+ if (Info.AlignIsRequired && Info.Align > 4)
+ return RAA_Indirect;
// If C++ prohibits us from making a copy, construct the arguments directly
// into argument memory.
- if (!RD->canPassInRegisters())
- return RAA_DirectInMemory;
-
- // Otherwise, construct the argument into a temporary and copy the bytes
- // into the outgoing argument memory.
- return RAA_Default;
+ return RAA_DirectInMemory;
+ }
case llvm::Triple::x86_64:
case llvm::Triple::aarch64:
- return !RD->canPassInRegisters() ? RAA_Indirect : RAA_Default;
+ return RAA_Indirect;
}
llvm_unreachable("invalid enum");
@@ -1064,11 +1073,7 @@ bool MicrosoftCXXABI::hasMostDerivedReturn(GlobalDecl GD) const {
return isDeletingDtor(GD);
}
-static bool IsSizeGreaterThan128(const CXXRecordDecl *RD) {
- return RD->getASTContext().getTypeSize(RD->getTypeForDecl()) > 128;
-}
-
-static bool hasMicrosoftABIRestrictions(const CXXRecordDecl *RD) {
+static bool isTrivialForAArch64MSVC(const CXXRecordDecl *RD) {
// For AArch64, we use the C++14 definition of an aggregate, so we also
// check for:
// No private or protected non static data members.
@@ -1077,19 +1082,19 @@ static bool hasMicrosoftABIRestrictions(const CXXRecordDecl *RD) {
// Additionally, we need to ensure that there is a trivial copy assignment
// operator, a trivial destructor and no user-provided constructors.
if (RD->hasProtectedFields() || RD->hasPrivateFields())
- return true;
+ return false;
if (RD->getNumBases() > 0)
- return true;
+ return false;
if (RD->isPolymorphic())
- return true;
+ return false;
if (RD->hasNonTrivialCopyAssignment())
- return true;
+ return false;
for (const CXXConstructorDecl *Ctor : RD->ctors())
if (Ctor->isUserProvided())
- return true;
+ return false;
if (RD->hasNonTrivialDestructor())
- return true;
- return false;
+ return false;
+ return true;
}
bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
@@ -1097,21 +1102,29 @@ bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
if (!RD)
return false;
+ // Normally, the C++ concept of "is trivially copyable" is used to determine
+ // if a struct can be returned directly. However, as MSVC and the language
+ // have evolved, the definition of "trivially copyable" has changed, while the
+ // ABI must remain stable. AArch64 uses the C++14 concept of an "aggregate",
+ // while other ISAs use the older concept of "plain old data".
+ bool isTrivialForABI = RD->isPOD();
bool isAArch64 = CGM.getTarget().getTriple().isAArch64();
- bool isSimple = !isAArch64 || !hasMicrosoftABIRestrictions(RD);
- bool isIndirectReturn =
- isAArch64 ? (!RD->canPassInRegisters() ||
- IsSizeGreaterThan128(RD))
- : !RD->isPOD();
- bool isInstanceMethod = FI.isInstanceMethod();
-
- if (isIndirectReturn || !isSimple || isInstanceMethod) {
+ if (isAArch64)
+ isTrivialForABI = RD->canPassInRegisters() && isTrivialForAArch64MSVC(RD);
+
+ // MSVC always returns structs indirectly from C++ instance methods.
+ bool isIndirectReturn = !isTrivialForABI || FI.isInstanceMethod();
+
+ if (isIndirectReturn) {
CharUnits Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
- FI.getReturnInfo().setSRetAfterThis(isInstanceMethod);
- FI.getReturnInfo().setInReg(isAArch64 &&
- !(isSimple && IsSizeGreaterThan128(RD)));
+ // MSVC always passes `this` before the `sret` parameter.
+ FI.getReturnInfo().setSRetAfterThis(FI.isInstanceMethod());
+
+ // On AArch64, use the `inreg` attribute if the object is considered to not
+ // be trivially copyable, or if this is an instance method struct return.
+ FI.getReturnInfo().setInReg(isAArch64);
return true;
}
@@ -1234,12 +1247,14 @@ void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
// the typical calling convention and have a single 'this' pointer for an
// argument -or- they get a wrapper function which appropriately thunks to the
// real default constructor. This thunk is the default constructor closure.
- if (D->hasAttr<DLLExportAttr>() && D->isDefaultConstructor())
+ if (D->hasAttr<DLLExportAttr>() && D->isDefaultConstructor() &&
+ D->isDefined()) {
if (!hasDefaultCXXMethodCC(getContext(), D) || D->getNumParams() != 0) {
llvm::Function *Fn = getAddrOfCXXCtorClosure(D, Ctor_DefaultClosure);
Fn->setLinkage(llvm::GlobalValue::WeakODRLinkage);
CGM.setGVProperties(Fn, D);
}
+ }
}
void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
@@ -1637,8 +1652,9 @@ void MicrosoftCXXABI::emitVTableTypeMetadata(const VPtrInfo &Info,
// TODO: Should VirtualFunctionElimination also be supported here?
// See similar handling in CodeGenModule::EmitVTableTypeMetadata.
if (CGM.getCodeGenOpts().WholeProgramVTables) {
+ llvm::DenseSet<const CXXRecordDecl *> Visited;
llvm::GlobalObject::VCallVisibility TypeVis =
- CGM.GetVCallVisibilityLevel(RD);
+ CGM.GetVCallVisibilityLevel(RD, Visited);
if (TypeVis != llvm::GlobalObject::VCallVisibilityPublic)
VTable->setVCallVisibilityMetadata(TypeVis);
}
@@ -4345,3 +4361,12 @@ MicrosoftCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
performBaseAdjustment(CGF, This, QualType(RD->getTypeForDecl(), 0));
return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
}
+
+bool MicrosoftCXXABI::isPermittedToBeHomogeneousAggregate(
+ const CXXRecordDecl *CXXRD) const {
+ // MSVC Windows on Arm64 considers a type not HFA if it is not an
+ // aggregate according to the C++14 spec. This is not consistent with the
+ // AAPCS64, but is defacto spec on that platform.
+ return !CGM.getTarget().getTriple().isAArch64() ||
+ isTrivialForAArch64MSVC(CXXRD);
+}
diff --git a/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index 0c7e5f4598f8..de5c1a4c8f02 100644
--- a/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -49,7 +49,7 @@ class PCHContainerGenerator : public ASTConsumer {
const PreprocessorOptions &PreprocessorOpts;
CodeGenOptions CodeGenOpts;
const TargetOptions TargetOpts;
- const LangOptions LangOpts;
+ LangOptions LangOpts;
std::unique_ptr<llvm::LLVMContext> VMContext;
std::unique_ptr<llvm::Module> M;
std::unique_ptr<CodeGen::CodeGenModule> Builder;
@@ -147,7 +147,7 @@ public:
// The debug info output isn't affected by CodeModel and
// ThreadModel, but the backend expects them to be nonempty.
CodeGenOpts.CodeModel = "default";
- CodeGenOpts.ThreadModel = "single";
+ LangOpts.setThreadModel(LangOptions::ThreadModelKind::Single);
CodeGenOpts.DebugTypeExtRefs = true;
// When building a module MainFileName is the name of the modulemap file.
CodeGenOpts.MainFileName =
@@ -250,10 +250,10 @@ public:
// PCH files don't have a signature field in the control block,
// but LLVM detects DWO CUs by looking for a non-zero DWO id.
// We use the lower 64 bits for debug info.
+
uint64_t Signature =
- Buffer->Signature
- ? (uint64_t)Buffer->Signature[1] << 32 | Buffer->Signature[0]
- : ~1ULL;
+ Buffer->Signature ? Buffer->Signature.truncatedValue() : ~1ULL;
+
Builder->getModuleDebugInfo()->setDwoId(Signature);
// Finalize the Builder.
diff --git a/clang/lib/CodeGen/SwiftCallingConv.cpp b/clang/lib/CodeGen/SwiftCallingConv.cpp
index 3d7421ac2e16..1d712f4fde3c 100644
--- a/clang/lib/CodeGen/SwiftCallingConv.cpp
+++ b/clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -93,11 +93,24 @@ void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
// Just add it all as opaque.
addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));
- // Everything else is scalar and should not convert as an LLVM aggregate.
+ // Atomic types.
+ } else if (const auto *atomicType = type->getAs<AtomicType>()) {
+ auto valueType = atomicType->getValueType();
+ auto atomicSize = CGM.getContext().getTypeSizeInChars(atomicType);
+ auto valueSize = CGM.getContext().getTypeSizeInChars(valueType);
+
+ addTypedData(atomicType->getValueType(), begin);
+
+ // Add atomic padding.
+ auto atomicPadding = atomicSize - valueSize;
+ if (atomicPadding > CharUnits::Zero())
+ addOpaqueData(begin + valueSize, begin + atomicSize);
+
+ // Everything else is scalar and should not convert as an LLVM aggregate.
} else {
// We intentionally convert as !ForMem because we want to preserve
// that a type was an i1.
- auto llvmType = CGM.getTypes().ConvertType(type);
+ auto *llvmType = CGM.getTypes().ConvertType(type);
addTypedData(llvmType, begin);
}
}
@@ -320,9 +333,12 @@ restartAfterSplit:
// If we have a vector type, split it.
if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
auto eltTy = vecTy->getElementType();
- CharUnits eltSize = (end - begin) / vecTy->getNumElements();
+ CharUnits eltSize =
+ (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements();
assert(eltSize == getTypeStoreSize(CGM, eltTy));
- for (unsigned i = 0, e = vecTy->getNumElements(); i != e; ++i) {
+ for (unsigned i = 0,
+ e = cast<llvm::FixedVectorType>(vecTy)->getNumElements();
+ i != e; ++i) {
addEntry(eltTy, begin, begin + eltSize);
begin += eltSize;
}
@@ -674,8 +690,9 @@ bool swiftcall::isLegalIntegerType(CodeGenModule &CGM,
bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
llvm::VectorType *vectorTy) {
- return isLegalVectorType(CGM, vectorSize, vectorTy->getElementType(),
- vectorTy->getNumElements());
+ return isLegalVectorType(
+ CGM, vectorSize, vectorTy->getElementType(),
+ cast<llvm::FixedVectorType>(vectorTy)->getNumElements());
}
bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
@@ -688,7 +705,7 @@ bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
std::pair<llvm::Type*, unsigned>
swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
llvm::VectorType *vectorTy) {
- auto numElts = vectorTy->getNumElements();
+ auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements();
auto eltTy = vectorTy->getElementType();
// Try to split the vector type in half.
@@ -710,7 +727,7 @@ void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
}
// Try to split the vector into legal subvectors.
- auto numElts = origVectorTy->getNumElements();
+ auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements();
auto eltTy = origVectorTy->getElementType();
assert(numElts != 1);
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 9cd63ebe29ee..bcd24292ff41 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -80,17 +80,17 @@ static bool isAggregateTypeForABI(QualType T) {
T->isMemberFunctionPointerType();
}
-ABIArgInfo
-ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
- llvm::Type *Padding) const {
- return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
- ByRef, Realign, Padding);
+ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
+ bool Realign,
+ llvm::Type *Padding) const {
+ return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
+ Realign, Padding);
}
ABIArgInfo
ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
- /*ByRef*/ false, Realign);
+ /*ByVal*/ false, Realign);
}
Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
@@ -257,6 +257,11 @@ LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
<< " ByVal=" << getIndirectByVal()
<< " Realign=" << getIndirectRealign();
break;
+ case IndirectAliased:
+ OS << "Indirect Align=" << getIndirectAlign().getQuantity()
+ << " AadrSpace=" << getIndirectAddrSpace()
+ << " Realign=" << getIndirectRealign();
+ break;
case Expand:
OS << "Expand";
break;
@@ -354,7 +359,7 @@ static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
/// leaving one or more empty slots behind as padding.
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType ValueTy, bool IsIndirect,
- std::pair<CharUnits, CharUnits> ValueInfo,
+ TypeInfoChars ValueInfo,
CharUnits SlotSizeAndAlign,
bool AllowHigherAlign) {
// The size and alignment of the value that was passed directly.
@@ -363,8 +368,8 @@ static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
DirectSize = CGF.getPointerSize();
DirectAlign = CGF.getPointerAlign();
} else {
- DirectSize = ValueInfo.first;
- DirectAlign = ValueInfo.second;
+ DirectSize = ValueInfo.Width;
+ DirectAlign = ValueInfo.Align;
}
// Cast the address we've calculated to the right type.
@@ -378,7 +383,7 @@ static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
AllowHigherAlign);
if (IsIndirect) {
- Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
+ Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.Align);
}
return Addr;
@@ -651,7 +656,7 @@ Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
- CharUnits TyAlignForABI = TyInfo.second;
+ CharUnits TyAlignForABI = TyInfo.Align;
llvm::Type *BaseTy =
llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
@@ -1084,11 +1089,6 @@ struct CCState {
unsigned FreeSSERegs = 0;
};
-enum {
- // Vectorcall only allows the first 6 parameters to be passed in registers.
- VectorcallMaxParamNumAsReg = 6
-};
-
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public SwiftABIInfo {
enum Class {
@@ -1989,6 +1989,7 @@ static bool isArgInAlloca(const ABIArgInfo &Info) {
case ABIArgInfo::InAlloca:
return true;
case ABIArgInfo::Ignore:
+ case ABIArgInfo::IndirectAliased:
return false;
case ABIArgInfo::Indirect:
case ABIArgInfo::Direct:
@@ -2056,8 +2057,8 @@ Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
//
// Just messing with TypeInfo like this works because we never pass
// anything indirectly.
- TypeInfo.second = CharUnits::fromQuantity(
- getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
+ TypeInfo.Align = CharUnits::fromQuantity(
+ getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
TypeInfo, CharUnits::fromQuantity(4),
@@ -2091,6 +2092,23 @@ bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
}
}
+static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) {
+ if (!FD->hasAttr<AnyX86InterruptAttr>())
+ return;
+
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->setCallingConv(llvm::CallingConv::X86_INTR);
+ if (FD->getNumParams() == 0)
+ return;
+
+ auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
+ llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
+ llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
+ Fn->getContext(), ByValTy);
+ Fn->addParamAttr(0, NewAttr);
+}
+
void X86_32TargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
if (GV->isDeclaration())
@@ -2100,10 +2118,8 @@ void X86_32TargetCodeGenInfo::setTargetAttributes(
llvm::Function *Fn = cast<llvm::Function>(GV);
Fn->addFnAttr("stackrealign");
}
- if (FD->hasAttr<AnyX86InterruptAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->setCallingConv(llvm::CallingConv::X86_INTR);
- }
+
+ addX86InterruptAttrs(FD, GV, CGM);
}
}
@@ -2384,10 +2400,8 @@ public:
private:
ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
bool IsVectorCall, bool IsRegCall) const;
- ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
- const ABIArgInfo &current) const;
- void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
- bool IsVectorCall, bool IsRegCall) const;
+ ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,
+ const ABIArgInfo &current) const;
X86AVXABILevel AVXLevel;
@@ -2404,10 +2418,8 @@ public:
}
/// Disable tail call on x86-64. The epilogue code before the tail jump blocks
- /// the autoreleaseRV/retainRV optimization.
- bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const override {
- return true;
- }
+ /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
+ bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
return 7;
@@ -2472,10 +2484,8 @@ public:
llvm::Function *Fn = cast<llvm::Function>(GV);
Fn->addFnAttr("stackrealign");
}
- if (FD->hasAttr<AnyX86InterruptAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->setCallingConv(llvm::CallingConv::X86_INTR);
- }
+
+ addX86InterruptAttrs(FD, GV, CGM);
}
}
@@ -2586,7 +2596,7 @@ static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
// If the argument does not end in .lib, automatically add the suffix.
// If the argument contains a space, enclose it in quotes.
// This matches the behavior of MSVC.
- bool Quote = (Lib.find(" ") != StringRef::npos);
+ bool Quote = (Lib.find(' ') != StringRef::npos);
std::string ArgStr = Quote ? "\"" : "";
ArgStr += Lib;
if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
@@ -2685,10 +2695,8 @@ void WinX86_64TargetCodeGenInfo::setTargetAttributes(
llvm::Function *Fn = cast<llvm::Function>(GV);
Fn->addFnAttr("stackrealign");
}
- if (FD->hasAttr<AnyX86InterruptAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->setCallingConv(llvm::CallingConv::X86_INTR);
- }
+
+ addX86InterruptAttrs(FD, GV, CGM);
}
addStackProbeTargetAttributes(D, GV, CGM);
@@ -3057,6 +3065,11 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// Classify the fields one at a time, merging the results.
unsigned idx = 0;
+ bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver11 ||
+ getContext().getTargetInfo().getTriple().isPS4();
+ bool IsUnion = RT->isUnionType() && !UseClang11Compat;
+
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i, ++idx) {
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
@@ -3067,14 +3080,17 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
continue;
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
- // four eightbytes, or it contains unaligned fields, it has class MEMORY.
+ // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
//
- // The only case a 256-bit wide vector could be used is when the struct
- // contains a single 256-bit element. Since Lo and Hi logic isn't extended
- // to work for sizes wider than 128, early check and fallback to memory.
+ // The only case a 256-bit or a 512-bit wide vector could be used is when
+ // the struct contains a single 256-bit or 512-bit element. Early check
+ // and fallback to memory.
//
- if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
- Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
+ // FIXME: Extended the Lo and Hi logic properly to work for size wider
+ // than 128.
+ if (Size > 128 &&
+ ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
+ Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
Lo = Memory;
postMerge(Size, Lo, Hi);
return;
@@ -4059,10 +4075,9 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
// Copy to a temporary if necessary to ensure the appropriate alignment.
- std::pair<CharUnits, CharUnits> SizeAlign =
- getContext().getTypeInfoInChars(Ty);
- uint64_t TySize = SizeAlign.first.getQuantity();
- CharUnits TyAlign = SizeAlign.second;
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+ uint64_t TySize = TInfo.Width.getQuantity();
+ CharUnits TyAlign = TInfo.Align;
// Copy into a temporary if the type is more aligned than the
// register save area.
@@ -4141,10 +4156,8 @@ Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
/*allowHigherAlign*/ false);
}
-ABIArgInfo
-WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
- const ABIArgInfo &current) const {
- // Assumes vectorCall calling convention.
+ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
+ QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo &current) const {
const Type *Base = nullptr;
uint64_t NumElts = 0;
@@ -4277,31 +4290,6 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
return ABIArgInfo::getDirect();
}
-void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
- unsigned FreeSSERegs,
- bool IsVectorCall,
- bool IsRegCall) const {
- unsigned Count = 0;
- for (auto &I : FI.arguments()) {
- // Vectorcall in x64 only permits the first 6 arguments to be passed
- // as XMM/YMM registers.
- if (Count < VectorcallMaxParamNumAsReg)
- I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
- else {
- // Since these cannot be passed in registers, pretend no registers
- // are left.
- unsigned ZeroSSERegsAvail = 0;
- I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
- IsVectorCall, IsRegCall);
- }
- ++Count;
- }
-
- for (auto &I : FI.arguments()) {
- I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
- }
-}
-
void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
const unsigned CC = FI.getCallingConvention();
bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
@@ -4336,13 +4324,25 @@ void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
FreeSSERegs = 16;
}
+ unsigned ArgNum = 0;
+ unsigned ZeroSSERegs = 0;
+ for (auto &I : FI.arguments()) {
+ // Vectorcall in x64 only permits the first 6 arguments to be passed as
+ // XMM/YMM registers. After the sixth argument, pretend no vector
+ // registers are left.
+ unsigned *MaybeFreeSSERegs =
+ (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
+ I.info =
+ classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);
+ ++ArgNum;
+ }
+
if (IsVectorCall) {
- computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
- } else {
+ // For vectorcall, assign aggregate HVAs to any free vector registers in a
+ // second pass.
for (auto &I : FI.arguments())
- I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
+ I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);
}
-
}
Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
@@ -4500,16 +4500,14 @@ bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isAnyComplexType())
- llvm::report_fatal_error("complex type is not supported on AIX yet");
+ return ABIArgInfo::getDirect();
if (RetTy->isVectorType())
- llvm::report_fatal_error("vector type is not supported on AIX yet");
+ return ABIArgInfo::getDirect();
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
- // TODO: Evaluate if AIX power alignment rule would have an impact on the
- // alignment here.
if (isAggregateTypeForABI(RetTy))
return getNaturalAlignIndirect(RetTy);
@@ -4521,13 +4519,11 @@ ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
Ty = useFirstFieldIfTransparentUnion(Ty);
if (Ty->isAnyComplexType())
- llvm::report_fatal_error("complex type is not supported on AIX yet");
+ return ABIArgInfo::getDirect();
if (Ty->isVectorType())
- llvm::report_fatal_error("vector type is not supported on AIX yet");
+ return ABIArgInfo::getDirect();
- // TODO: Evaluate if AIX power alignment rule would have an impact on the
- // alignment here.
if (isAggregateTypeForABI(Ty)) {
// Records with non-trivial destructors/copy-constructors should not be
// passed by value.
@@ -4546,11 +4542,12 @@ ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
}
CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
- if (Ty->isAnyComplexType())
- llvm::report_fatal_error("complex type is not supported on AIX yet");
+ // Complex types are passed just like their elements.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
if (Ty->isVectorType())
- llvm::report_fatal_error("vector type is not supported on AIX yet");
+ return CharUnits::fromQuantity(16);
// If the structure contains a vector type, the alignment is 16.
if (isRecordWithSIMDVectorType(getContext(), Ty))
@@ -4565,10 +4562,11 @@ Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
llvm::report_fatal_error("complex type is not supported on AIX yet");
if (Ty->isVectorType())
- llvm::report_fatal_error("vector type is not supported on AIX yet");
+ llvm::report_fatal_error(
+ "vector types are not yet supported for variadic functions on AIX");
auto TypeInfo = getContext().getTypeInfoInChars(Ty);
- TypeInfo.second = getParamTypeAlignment(Ty);
+ TypeInfo.Align = getParamTypeAlignment(Ty);
CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
@@ -4687,7 +4685,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
QualType Ty) const {
if (getTarget().getTriple().isOSDarwin()) {
auto TI = getContext().getTypeInfoInChars(Ty);
- TI.second = getParamTypeAlignment(Ty);
+ TI.Align = getParamTypeAlignment(Ty);
CharUnits SlotSize = CharUnits::fromQuantity(4);
return emitVoidPtrVAArg(CGF, VAList, Ty,
@@ -4711,13 +4709,12 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
// };
bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
- bool isInt =
- Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
+ bool isInt = !Ty->isFloatingType();
bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
// All aggregates are passed indirectly? That doesn't seem consistent
// with the argument-lowering code.
- bool isIndirect = Ty->isAggregateType();
+ bool isIndirect = isAggregateTypeForABI(Ty);
CGBuilderTy &Builder = CGF.Builder;
@@ -4797,7 +4794,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
CharUnits Size;
if (!isIndirect) {
auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
- Size = TypeInfo.first.alignTo(OverflowAreaAlign);
+ Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
} else {
Size = CGF.getPointerSize();
}
@@ -4838,7 +4835,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
const llvm::Triple &Triple, const CodeGenOptions &Opts) {
- assert(Triple.getArch() == llvm::Triple::ppc);
+ assert(Triple.isPPC32());
switch (Opts.getStructReturnConvention()) {
case CodeGenOptions::SRCK_Default:
@@ -4876,42 +4873,12 @@ public:
private:
static const unsigned GPRBits = 64;
ABIKind Kind;
- bool HasQPX;
bool IsSoftFloatABI;
- // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
- // will be passed in a QPX register.
- bool IsQPXVectorTy(const Type *Ty) const {
- if (!HasQPX)
- return false;
-
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- unsigned NumElements = VT->getNumElements();
- if (NumElements == 1)
- return false;
-
- if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
- if (getContext().getTypeSize(Ty) <= 256)
- return true;
- } else if (VT->getElementType()->
- isSpecificBuiltinType(BuiltinType::Float)) {
- if (getContext().getTypeSize(Ty) <= 128)
- return true;
- }
- }
-
- return false;
- }
-
- bool IsQPXVectorTy(QualType Ty) const {
- return IsQPXVectorTy(Ty.getTypePtr());
- }
-
public:
- PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
+ PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind,
bool SoftFloatABI)
- : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
- IsSoftFloatABI(SoftFloatABI) {}
+ : SwiftABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
bool isPromotableTypeForABI(QualType Ty) const;
CharUnits getParamTypeAlignment(QualType Ty) const;
@@ -4939,8 +4906,7 @@ public:
const Type *T = isSingleElementStruct(I.type, getContext());
if (T) {
const BuiltinType *BT = T->getAs<BuiltinType>();
- if (IsQPXVectorTy(T) ||
- (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
+ if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
(BT && BT->isFloatingPoint())) {
QualType QT(T, 0);
I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
@@ -4968,10 +4934,10 @@ class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
- PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
+ PPC64_SVR4_ABIInfo::ABIKind Kind,
bool SoftFloatABI)
- : TargetCodeGenInfo(std::make_unique<PPC64_SVR4_ABIInfo>(
- CGT, Kind, HasQPX, SoftFloatABI)) {}
+ : TargetCodeGenInfo(
+ std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
@@ -5036,13 +5002,15 @@ CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
// Only vector types of size 16 bytes need alignment (larger types are
// passed via reference, smaller types are not aligned).
- if (IsQPXVectorTy(Ty)) {
- if (getContext().getTypeSize(Ty) > 128)
- return CharUnits::fromQuantity(32);
-
- return CharUnits::fromQuantity(16);
- } else if (Ty->isVectorType()) {
+ if (Ty->isVectorType()) {
return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
+ } else if (Ty->isRealFloatingType() &&
+ &getContext().getFloatTypeSemantics(Ty) ==
+ &llvm::APFloat::IEEEquad()) {
+ // According to ABI document section 'Optional Save Areas': If extended
+ // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
+ // format are supported, map them to a single quadword, quadword aligned.
+ return CharUnits::fromQuantity(16);
}
// For single-element float/vector structs, we consider the whole type
@@ -5051,8 +5019,7 @@ CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
const Type *EltType = isSingleElementStruct(Ty, getContext());
if (EltType) {
const BuiltinType *BT = EltType->getAs<BuiltinType>();
- if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
- getContext().getTypeSize(EltType) == 128) ||
+ if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
(BT && BT->isFloatingPoint()))
AlignAsType = EltType;
}
@@ -5065,20 +5032,13 @@ CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
AlignAsType = Base;
// With special case aggregates, only vector base types need alignment.
- if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
- if (getContext().getTypeSize(AlignAsType) > 128)
- return CharUnits::fromQuantity(32);
-
- return CharUnits::fromQuantity(16);
- } else if (AlignAsType) {
+ if (AlignAsType) {
return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
}
// Otherwise, we only need alignment for any aggregate type that
// has an alignment requirement of >= 16 bytes.
if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
- if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
- return CharUnits::fromQuantity(32);
return CharUnits::fromQuantity(16);
}
@@ -5104,8 +5064,12 @@ bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
Members = 0;
- // If this is a C++ record, check the bases first.
+ // If this is a C++ record, check the properties of the record such as
+ // bases and ABI specific restrictions
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
+ return false;
+
for (const auto &I : CXXRD->bases()) {
// Ignore empty records.
if (isEmptyRecord(getContext(), I.getType(), true))
@@ -5202,7 +5166,7 @@ bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
}
}
if (const VectorType *VT = Ty->getAs<VectorType>()) {
- if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
+ if (getContext().getTypeSize(VT) == 128)
return true;
}
return false;
@@ -5231,7 +5195,7 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
// Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
// or via reference (larger than 16 bytes).
- if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
+ if (Ty->isVectorType()) {
uint64_t Size = getContext().getTypeSize(Ty);
if (Size > 128)
return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
@@ -5307,7 +5271,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
// Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
// or via reference (larger than 16 bytes).
- if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
+ if (RetTy->isVectorType()) {
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size > 128)
return getNaturalAlignIndirect(RetTy);
@@ -5360,7 +5324,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
auto TypeInfo = getContext().getTypeInfoInChars(Ty);
- TypeInfo.second = getParamTypeAlignment(Ty);
+ TypeInfo.Align = getParamTypeAlignment(Ty);
CharUnits SlotSize = CharUnits::fromQuantity(8);
@@ -5371,7 +5335,7 @@ Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// loads of the real and imaginary parts relative to the va_list pointer,
// and store them to a temporary structure.
if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- CharUnits EltSize = TypeInfo.first / 2;
+ CharUnits EltSize = TypeInfo.Width / 2;
if (EltSize < SlotSize) {
Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
SlotSize * 2, SlotSize,
@@ -5448,6 +5412,7 @@ private:
ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo coerceIllegalVector(QualType Ty) const;
bool isHomogeneousAggregateBaseType(QualType Ty) const override;
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
uint64_t Members) const override;
@@ -5471,6 +5436,11 @@ private:
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override {
+ llvm::Type *BaseTy = CGF.ConvertType(Ty);
+ if (isa<llvm::ScalableVectorType>(BaseTy))
+ llvm::report_fatal_error("Passing SVE types to variadic functions is "
+ "currently not supported");
+
return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
: isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
: EmitAAPCSVAArg(VAListAddr, Ty, CGF);
@@ -5516,40 +5486,33 @@ public:
if (!FD)
return;
- LangOptions::SignReturnAddressScopeKind Scope =
- CGM.getLangOpts().getSignReturnAddressScope();
- LangOptions::SignReturnAddressKeyKind Key =
- CGM.getLangOpts().getSignReturnAddressKey();
- bool BranchTargetEnforcement = CGM.getLangOpts().BranchTargetEnforcement;
- if (const auto *TA = FD->getAttr<TargetAttr>()) {
- ParsedTargetAttr Attr = TA->parse();
- if (!Attr.BranchProtection.empty()) {
- TargetInfo::BranchProtectionInfo BPI;
- StringRef Error;
- (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
- BPI, Error);
- assert(Error.empty());
- Scope = BPI.SignReturnAddr;
- Key = BPI.SignKey;
- BranchTargetEnforcement = BPI.BranchTargetEnforcement;
- }
- }
+ const auto *TA = FD->getAttr<TargetAttr>();
+ if (TA == nullptr)
+ return;
+
+ ParsedTargetAttr Attr = TA->parse();
+ if (Attr.BranchProtection.empty())
+ return;
+
+ TargetInfo::BranchProtectionInfo BPI;
+ StringRef Error;
+ (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
+ BPI, Error);
+ assert(Error.empty());
auto *Fn = cast<llvm::Function>(GV);
- if (Scope != LangOptions::SignReturnAddressScopeKind::None) {
- Fn->addFnAttr("sign-return-address",
- Scope == LangOptions::SignReturnAddressScopeKind::All
- ? "all"
- : "non-leaf");
+ static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
+ Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
+ if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
Fn->addFnAttr("sign-return-address-key",
- Key == LangOptions::SignReturnAddressKeyKind::AKey
+ BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
? "a_key"
: "b_key");
}
- if (BranchTargetEnforcement)
- Fn->addFnAttr("branch-target-enforcement");
+ Fn->addFnAttr("branch-target-enforcement",
+ BPI.BranchTargetEnforcement ? "true" : "false");
}
};
@@ -5581,33 +5544,96 @@ void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
}
}
+ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
+ assert(Ty->isVectorType() && "expected vector type!");
+
+ const auto *VT = Ty->castAs<VectorType>();
+ if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
+ assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
+ assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
+ BuiltinType::UChar &&
+ "unexpected builtin type for SVE predicate!");
+ return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
+ llvm::Type::getInt1Ty(getVMContext()), 16));
+ }
+
+ if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) {
+ assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
+
+ const auto *BT = VT->getElementType()->castAs<BuiltinType>();
+ llvm::ScalableVectorType *ResType = nullptr;
+ switch (BT->getKind()) {
+ default:
+ llvm_unreachable("unexpected builtin type for SVE vector!");
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt8Ty(getVMContext()), 16);
+ break;
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt16Ty(getVMContext()), 8);
+ break;
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), 4);
+ break;
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2);
+ break;
+ case BuiltinType::Half:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getHalfTy(getVMContext()), 8);
+ break;
+ case BuiltinType::Float:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getFloatTy(getVMContext()), 4);
+ break;
+ case BuiltinType::Double:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getDoubleTy(getVMContext()), 2);
+ break;
+ case BuiltinType::BFloat16:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getBFloatTy(getVMContext()), 8);
+ break;
+ }
+ return ABIArgInfo::getDirect(ResType);
+ }
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ // Android promotes <2 x i8> to i16, not i32
+ if (isAndroid() && (Size <= 16)) {
+ llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size <= 32) {
+ llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 64) {
+ auto *ResType =
+ llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 128) {
+ auto *ResType =
+ llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
Ty = useFirstFieldIfTransparentUnion(Ty);
// Handle illegal vector types here.
- if (isIllegalVectorType(Ty)) {
- uint64_t Size = getContext().getTypeSize(Ty);
- // Android promotes <2 x i8> to i16, not i32
- if (isAndroid() && (Size <= 16)) {
- llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size <= 32) {
- llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size == 64) {
- auto *ResType =
- llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size == 128) {
- auto *ResType =
- llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
- return ABIArgInfo::getDirect(ResType);
- }
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
- }
+ if (isIllegalVectorType(Ty))
+ return coerceIllegalVector(Ty);
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
@@ -5686,6 +5712,12 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
+ if (const auto *VT = RetTy->getAs<VectorType>()) {
+ if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
+ VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ return coerceIllegalVector(RetTy);
+ }
+
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
return getNaturalAlignIndirect(RetTy);
@@ -5741,6 +5773,13 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
/// isIllegalVectorType - check whether the vector type is legal for AArch64.
bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // Check whether VT is a fixed-length SVE vector. These types are
+ // represented as scalable vectors in function args/return and must be
+ // coerced from fixed vectors.
+ if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
+ VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ return true;
+
// Check whether VT is legal.
unsigned NumElements = VT->getNumElements();
uint64_t Size = getContext().getTypeSize(VT);
@@ -5933,13 +5972,13 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
Address Tmp = CGF.CreateTempAlloca(HFATy,
- std::max(TyAlign, BaseTyInfo.second));
+ std::max(TyAlign, BaseTyInfo.Align));
// On big-endian platforms, the value will be right-aligned in its slot.
int Offset = 0;
if (CGF.CGM.getDataLayout().isBigEndian() &&
- BaseTyInfo.first.getQuantity() < 16)
- Offset = 16 - BaseTyInfo.first.getQuantity();
+ BaseTyInfo.Width.getQuantity() < 16)
+ Offset = 16 - BaseTyInfo.Width.getQuantity();
for (unsigned i = 0; i < NumMembers; ++i) {
CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
@@ -6063,7 +6102,7 @@ Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
// Arguments bigger than 16 bytes which aren't homogeneous
// aggregates should be passed indirectly.
bool IsIndirect = false;
- if (TyInfo.first.getQuantity() > 16) {
+ if (TyInfo.Width.getQuantity() > 16) {
const Type *Base = nullptr;
uint64_t Members = 0;
IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
@@ -6825,7 +6864,7 @@ Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
TyAlignForABI = CharUnits::fromQuantity(4);
}
- std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI };
+ TypeInfoChars TyInfo(TySize, TyAlignForABI, false);
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
SlotSize, /*AllowHigherAlign*/ true);
}
@@ -7299,8 +7338,8 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
ArgTy = AI.getCoerceToType();
InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
IsVector = ArgTy->isVectorTy();
- UnpaddedSize = TyInfo.first;
- DirectAlign = TyInfo.second;
+ UnpaddedSize = TyInfo.Width;
+ DirectAlign = TyInfo.Align;
}
CharUnits PaddedSize = CharUnits::fromQuantity(8);
if (IsVector && UnpaddedSize > PaddedSize)
@@ -7321,7 +7360,7 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
Address OverflowArgArea =
Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
- TyInfo.second);
+ TyInfo.Align);
Address MemAddr =
CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
@@ -7418,7 +7457,7 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
if (IsIndirect)
ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
- TyInfo.second);
+ TyInfo.Align);
return ResAddr;
}
@@ -7915,8 +7954,8 @@ Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// The alignment of things in the argument area is never larger than
// StackAlignInBytes.
- TyInfo.second =
- std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
+ TyInfo.Align =
+ std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes));
// MinABIStackAlignInBytes is the size of argument slots on the stack.
CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
@@ -8557,7 +8596,7 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
if (RAA == CGCXXABI::RAA_Indirect) {
return getIndirectResult(Ty, /*ByVal=*/false, State);
} else if (RAA == CGCXXABI::RAA_DirectInMemory) {
- return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
}
}
@@ -8629,35 +8668,9 @@ private:
bool isHomogeneousAggregateSmallEnough(const Type *Base,
uint64_t Members) const override;
- // Coerce HIP pointer arguments from generic pointers to global ones.
+ // Coerce HIP scalar pointer arguments from generic pointers to global ones.
llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
unsigned ToAS) const {
- // Structure types.
- if (auto STy = dyn_cast<llvm::StructType>(Ty)) {
- SmallVector<llvm::Type *, 8> EltTys;
- bool Changed = false;
- for (auto T : STy->elements()) {
- auto NT = coerceKernelArgumentType(T, FromAS, ToAS);
- EltTys.push_back(NT);
- Changed |= (NT != T);
- }
- // Skip if there is no change in element types.
- if (!Changed)
- return STy;
- if (STy->hasName())
- return llvm::StructType::create(
- EltTys, (STy->getName() + ".coerce").str(), STy->isPacked());
- return llvm::StructType::get(getVMContext(), EltTys, STy->isPacked());
- }
- // Array types.
- if (auto ATy = dyn_cast<llvm::ArrayType>(Ty)) {
- auto T = ATy->getElementType();
- auto NT = coerceKernelArgumentType(T, FromAS, ToAS);
- // Skip if there is no change in that element type.
- if (NT == T)
- return ATy;
- return llvm::ArrayType::get(NT, ATy->getNumElements());
- }
// Single value types.
if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS)
return llvm::PointerType::get(
@@ -8792,18 +8805,31 @@ ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
// TODO: Can we omit empty structs?
- llvm::Type *LTy = nullptr;
if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
- LTy = CGT.ConvertType(QualType(SeltTy, 0));
+ Ty = QualType(SeltTy, 0);
+ llvm::Type *OrigLTy = CGT.ConvertType(Ty);
+ llvm::Type *LTy = OrigLTy;
if (getContext().getLangOpts().HIP) {
- if (!LTy)
- LTy = CGT.ConvertType(Ty);
LTy = coerceKernelArgumentType(
- LTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
+ OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
/*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
}
+ // FIXME: Should also use this for OpenCL, but it requires addressing the
+ // problem of kernels being called.
+ //
+ // FIXME: This doesn't apply the optimization of coercing pointers in structs
+ // to global address space when using byref. This would require implementing a
+ // new kind of coercion of the in-memory type when for indirect arguments.
+ if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy &&
+ isAggregateTypeForABI(Ty)) {
+ return ABIArgInfo::getIndirectAliased(
+ getContext().getTypeAlignInChars(Ty),
+ getContext().getTargetAddressSpace(LangAS::opencl_constant),
+ false /*Realign*/, nullptr /*Padding*/);
+ }
+
// If we set CanBeFlattened to true, CodeGen will expand the struct to its
// individual elements, which confuses the Clover OpenCL backend; therefore we
// have to set it to false here. Other args of getDirect() are just defaults.
@@ -8972,13 +8998,9 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
assert(Max == 0 && "Max must be zero");
} else if (IsOpenCLKernel || IsHIPKernel) {
// By default, restrict the maximum size to a value specified by
- // --gpu-max-threads-per-block=n or its default value for HIP.
- const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
- const unsigned DefaultMaxWorkGroupSize =
- IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
- : M.getLangOpts().GPUMaxThreadsPerBlock;
+ // --gpu-max-threads-per-block=n or its default value.
std::string AttrVal =
- std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
+ std::string("1,") + llvm::utostr(M.getLangOpts().GPUMaxThreadsPerBlock);
F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
}
@@ -9014,6 +9036,9 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
if (NumVGPR != 0)
F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
}
+
+ if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
+ F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
}
unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
@@ -9366,7 +9391,7 @@ Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
case ABIArgInfo::Extend: {
Stride = SlotSize;
- CharUnits Offset = SlotSize - TypeInfo.first;
+ CharUnits Offset = SlotSize - TypeInfo.Width;
ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
break;
}
@@ -9379,14 +9404,15 @@ Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
}
case ABIArgInfo::Indirect:
+ case ABIArgInfo::IndirectAliased:
Stride = SlotSize;
ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
- TypeInfo.second);
+ TypeInfo.Align);
break;
case ABIArgInfo::Ignore:
- return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
+ return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.Align);
}
// Update VAList.
@@ -9744,6 +9770,7 @@ Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
ArgSize = ArgSize.alignTo(SlotSize);
break;
case ABIArgInfo::Indirect:
+ case ABIArgInfo::IndirectAliased:
Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
Val = Address(Builder.CreateLoad(Val), TypeAlign);
ArgSize = SlotSize;
@@ -9901,14 +9928,27 @@ void XCoreTargetCodeGenInfo::emitTargetMetadata(
//===----------------------------------------------------------------------===//
namespace {
+class SPIRABIInfo : public DefaultABIInfo {
+public:
+ SPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
+
+private:
+ void setCCs();
+};
+} // end anonymous namespace
+namespace {
class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
public:
SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<SPIRABIInfo>(CGT)) {}
unsigned getOpenCLKernelCallingConv() const override;
};
} // End anonymous namespace.
+void SPIRABIInfo::setCCs() {
+ assert(getRuntimeCC() == llvm::CallingConv::C);
+ RuntimeCC = llvm::CallingConv::SPIR_FUNC;
+}
namespace clang {
namespace CodeGen {
@@ -10318,7 +10358,7 @@ void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!IsRetIndirect && RetTy->isScalarType() &&
getContext().getTypeSize(RetTy) > (2 * XLen)) {
if (RetTy->isComplexType() && FLen) {
- QualType EltTy = RetTy->getAs<ComplexType>()->getElementType();
+ QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
} else {
// This is a normal scalar > 2*XLen, such as fp128 on RV32.
@@ -10390,7 +10430,6 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
return false;
Field1Ty = CGT.ConvertType(EltTy);
Field1Off = CurOff;
- assert(CurOff.isZero() && "Unexpected offset for first field");
Field2Ty = Field1Ty;
Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
return true;
@@ -10485,7 +10524,7 @@ bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
NeededArgFPRs++;
else if (Field2Ty)
NeededArgGPRs++;
- return IsCandidate;
+ return true;
}
// Call getCoerceAndExpand for the two-element flattened struct described by
@@ -10511,15 +10550,15 @@ ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
CharUnits Field2Align =
CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
- CharUnits Field1Size =
+ CharUnits Field1End = Field1Off +
CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
- CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align);
+ CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
CharUnits Padding = CharUnits::Zero();
if (Field2Off > Field2OffNoPadNoPack)
Padding = Field2Off - Field2OffNoPadNoPack;
- else if (Field2Off != Field2Align && Field2Off > Field1Size)
- Padding = Field2Off - Field1Size;
+ else if (Field2Off != Field2Align && Field2Off > Field1End)
+ Padding = Field2Off - Field1End;
bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
@@ -10681,13 +10720,12 @@ Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
return Addr;
}
- std::pair<CharUnits, CharUnits> SizeAndAlign =
- getContext().getTypeInfoInChars(Ty);
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
// Arguments bigger than 2*Xlen bytes are passed indirectly.
- bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
+ bool IsIndirect = TInfo.Width > 2 * SlotSize;
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign,
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
SlotSize, /*AllowHigherAlign=*/true);
}
@@ -10745,21 +10783,24 @@ private:
} // end anonymous namespace
ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
- if (Ty->isAnyComplexType()) {
+ if (Ty->isAnyComplexType())
return ABIArgInfo::getDirect();
- }
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size < 64 && Ty->isIntegerType())
+ return ABIArgInfo::getExtend(Ty);
return DefaultABIInfo::classifyReturnType(Ty);
}
ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
- if (Ty->isAnyComplexType()) {
+ if (Ty->isAnyComplexType())
return ABIArgInfo::getDirect();
- }
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size < 64 && Ty->isIntegerType())
+ return ABIArgInfo::getExtend(Ty);
return DefaultABIInfo::classifyArgumentType(Ty);
}
void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
-
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &Arg : FI.arguments())
Arg.info = classifyArgumentType(Arg.type);
@@ -10874,6 +10915,13 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return SetCGInfo(
new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
}
+ case llvm::Triple::ppcle: {
+ bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
+ bool RetSmallStructInRegABI =
+ PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
+ return SetCGInfo(
+ new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
+ }
case llvm::Triple::ppc64:
if (Triple.isOSAIX())
return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true));
@@ -10882,23 +10930,21 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
if (getTarget().getABI() == "elfv2")
Kind = PPC64_SVR4_ABIInfo::ELFv2;
- bool HasQPX = getTarget().getABI() == "elfv1-qpx";
bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
- return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
- IsSoftFloat));
+ return SetCGInfo(
+ new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat));
}
return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
case llvm::Triple::ppc64le: {
assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
- if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
+ if (getTarget().getABI() == "elfv1")
Kind = PPC64_SVR4_ABIInfo::ELFv1;
- bool HasQPX = getTarget().getABI() == "elfv1-qpx";
bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
- return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
- IsSoftFloat));
+ return SetCGInfo(
+ new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat));
}
case llvm::Triple::nvptx:
@@ -11011,7 +11057,8 @@ TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
llvm::SmallVector<llvm::Value *, 2> Args;
for (auto &A : F->args())
Args.push_back(&A);
- Builder.CreateCall(Invoke, Args);
+ llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
+ call->setCallingConv(Invoke->getCallingConv());
Builder.CreateRetVoid();
Builder.restoreIP(IP);
return F;
@@ -11075,7 +11122,8 @@ llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
Args.push_back(Cast);
for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
Args.push_back(I);
- Builder.CreateCall(Invoke, Args);
+ llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
+ call->setCallingConv(Invoke->getCallingConv());
Builder.CreateRetVoid();
Builder.restoreIP(IP);
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index 1152cabce4a0..0df9667e91e1 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -163,11 +163,9 @@ public:
return "";
}
- /// Determine whether a call to objc_retainAutoreleasedReturnValue should be
- /// marked as 'notail'.
- virtual bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const {
- return false;
- }
+ /// Determine whether a call to objc_retainAutoreleasedReturnValue or
+ /// objc_unsafeClaimAutoreleasedReturnValue should be marked as 'notail'.
+ virtual bool markARCOptimizedReturnCallsAsNoTail() const { return false; }
/// Return a constant used by UBSan as a signature to identify functions
/// possessing type information, or 0 if the platform is unsupported.
diff --git a/clang/lib/CrossTU/CrossTranslationUnit.cpp b/clang/lib/CrossTU/CrossTranslationUnit.cpp
index 80465c41d151..e27779f91abc 100644
--- a/clang/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/clang/lib/CrossTU/CrossTranslationUnit.cpp
@@ -157,13 +157,12 @@ parseCrossTUIndex(StringRef IndexPath) {
unsigned LineNo = 1;
while (std::getline(ExternalMapFile, Line)) {
StringRef LineRef{Line};
- const size_t Delimiter = LineRef.find(" ");
+ const size_t Delimiter = LineRef.find(' ');
if (Delimiter > 0 && Delimiter != std::string::npos) {
StringRef LookupName = LineRef.substr(0, Delimiter);
// Store paths with posix-style directory separator.
- SmallVector<char, 32> FilePath;
- llvm::Twine{LineRef.substr(Delimiter + 1)}.toVector(FilePath);
+ SmallString<32> FilePath(LineRef.substr(Delimiter + 1));
llvm::sys::path::native(FilePath, llvm::sys::path::Style::posix);
bool InsertionOccured;
@@ -624,15 +623,14 @@ parseInvocationList(StringRef FileContent, llvm::sys::path::Style PathStyle) {
return llvm::make_error<IndexError>(
index_error_code::invocation_list_wrong_format);
- SmallVector<char, 32> ValueStorage;
+ SmallString<32> ValueStorage;
StringRef SourcePath = Key->getValue(ValueStorage);
// Store paths with PathStyle directory separator.
- SmallVector<char, 32> NativeSourcePath;
- llvm::Twine{SourcePath}.toVector(NativeSourcePath);
+ SmallString<32> NativeSourcePath(SourcePath);
llvm::sys::path::native(NativeSourcePath, PathStyle);
- StringRef InvocationKey{NativeSourcePath.begin(), NativeSourcePath.size()};
+ StringRef InvocationKey(NativeSourcePath);
if (InvocationList.find(InvocationKey) != InvocationList.end())
return llvm::make_error<IndexError>(
diff --git a/clang/lib/Driver/Compilation.cpp b/clang/lib/Driver/Compilation.cpp
index 05ee5091396b..d33055739080 100644
--- a/clang/lib/Driver/Compilation.cpp
+++ b/clang/lib/Driver/Compilation.cpp
@@ -193,6 +193,8 @@ int Compilation::ExecuteCommand(const Command &C,
std::string Error;
bool ExecutionFailed;
int Res = C.Execute(Redirects, &Error, &ExecutionFailed);
+ if (PostCallback)
+ PostCallback(C, Res);
if (!Error.empty()) {
assert(Res && "Error string set with 0 result code!");
getDriver().Diag(diag::err_drv_command_failure) << Error;
diff --git a/clang/lib/Driver/Distro.cpp b/clang/lib/Driver/Distro.cpp
index 4d58ad1ae78c..ee4fe841e7ee 100644
--- a/clang/lib/Driver/Distro.cpp
+++ b/clang/lib/Driver/Distro.cpp
@@ -15,76 +15,107 @@
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Threading.h"
using namespace clang::driver;
using namespace clang;
-static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS,
- const llvm::Triple &TargetOrHost) {
- // If we don't target Linux, no need to check the distro. This saves a few
- // OS calls.
- if (!TargetOrHost.isOSLinux())
+static Distro::DistroType DetectOsRelease(llvm::vfs::FileSystem &VFS) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
+ VFS.getBufferForFile("/etc/os-release");
+ if (!File)
+ File = VFS.getBufferForFile("/usr/lib/os-release");
+ if (!File)
return Distro::UnknownDistro;
- // If the host is not running Linux, and we're backed by a real file system,
- // no need to check the distro. This is the case where someone is
- // cross-compiling from BSD or Windows to Linux, and it would be meaningless
- // to try to figure out the "distro" of the non-Linux host.
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> RealFS =
- llvm::vfs::getRealFileSystem();
- llvm::Triple HostTriple(llvm::sys::getProcessTriple());
- if (!HostTriple.isOSLinux() && &VFS == RealFS.get())
- return Distro::UnknownDistro;
+ SmallVector<StringRef, 16> Lines;
+ File.get()->getBuffer().split(Lines, "\n");
+ Distro::DistroType Version = Distro::UnknownDistro;
+
+ // Obviously this can be improved a lot.
+ for (StringRef Line : Lines)
+ if (Version == Distro::UnknownDistro && Line.startswith("ID="))
+ Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(3))
+ .Case("fedora", Distro::Fedora)
+ .Case("gentoo", Distro::Gentoo)
+ .Case("arch", Distro::ArchLinux)
+ // On SLES, /etc/os-release was introduced in SLES 11.
+ .Case("sles", Distro::OpenSUSE)
+ .Case("opensuse", Distro::OpenSUSE)
+ .Case("exherbo", Distro::Exherbo)
+ .Default(Distro::UnknownDistro);
+ return Version;
+}
+static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
VFS.getBufferForFile("/etc/lsb-release");
- if (File) {
- StringRef Data = File.get()->getBuffer();
- SmallVector<StringRef, 16> Lines;
- Data.split(Lines, "\n");
- Distro::DistroType Version = Distro::UnknownDistro;
- for (StringRef Line : Lines)
- if (Version == Distro::UnknownDistro && Line.startswith("DISTRIB_CODENAME="))
- Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(17))
- .Case("hardy", Distro::UbuntuHardy)
- .Case("intrepid", Distro::UbuntuIntrepid)
- .Case("jaunty", Distro::UbuntuJaunty)
- .Case("karmic", Distro::UbuntuKarmic)
- .Case("lucid", Distro::UbuntuLucid)
- .Case("maverick", Distro::UbuntuMaverick)
- .Case("natty", Distro::UbuntuNatty)
- .Case("oneiric", Distro::UbuntuOneiric)
- .Case("precise", Distro::UbuntuPrecise)
- .Case("quantal", Distro::UbuntuQuantal)
- .Case("raring", Distro::UbuntuRaring)
- .Case("saucy", Distro::UbuntuSaucy)
- .Case("trusty", Distro::UbuntuTrusty)
- .Case("utopic", Distro::UbuntuUtopic)
- .Case("vivid", Distro::UbuntuVivid)
- .Case("wily", Distro::UbuntuWily)
- .Case("xenial", Distro::UbuntuXenial)
- .Case("yakkety", Distro::UbuntuYakkety)
- .Case("zesty", Distro::UbuntuZesty)
- .Case("artful", Distro::UbuntuArtful)
- .Case("bionic", Distro::UbuntuBionic)
- .Case("cosmic", Distro::UbuntuCosmic)
- .Case("disco", Distro::UbuntuDisco)
- .Case("eoan", Distro::UbuntuEoan)
- .Case("focal", Distro::UbuntuFocal)
- .Case("groovy", Distro::UbuntuGroovy)
- .Default(Distro::UnknownDistro);
- if (Version != Distro::UnknownDistro)
- return Version;
- }
+ if (!File)
+ return Distro::UnknownDistro;
+
+ SmallVector<StringRef, 16> Lines;
+ File.get()->getBuffer().split(Lines, "\n");
+ Distro::DistroType Version = Distro::UnknownDistro;
+
+ for (StringRef Line : Lines)
+ if (Version == Distro::UnknownDistro &&
+ Line.startswith("DISTRIB_CODENAME="))
+ Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(17))
+ .Case("hardy", Distro::UbuntuHardy)
+ .Case("intrepid", Distro::UbuntuIntrepid)
+ .Case("jaunty", Distro::UbuntuJaunty)
+ .Case("karmic", Distro::UbuntuKarmic)
+ .Case("lucid", Distro::UbuntuLucid)
+ .Case("maverick", Distro::UbuntuMaverick)
+ .Case("natty", Distro::UbuntuNatty)
+ .Case("oneiric", Distro::UbuntuOneiric)
+ .Case("precise", Distro::UbuntuPrecise)
+ .Case("quantal", Distro::UbuntuQuantal)
+ .Case("raring", Distro::UbuntuRaring)
+ .Case("saucy", Distro::UbuntuSaucy)
+ .Case("trusty", Distro::UbuntuTrusty)
+ .Case("utopic", Distro::UbuntuUtopic)
+ .Case("vivid", Distro::UbuntuVivid)
+ .Case("wily", Distro::UbuntuWily)
+ .Case("xenial", Distro::UbuntuXenial)
+ .Case("yakkety", Distro::UbuntuYakkety)
+ .Case("zesty", Distro::UbuntuZesty)
+ .Case("artful", Distro::UbuntuArtful)
+ .Case("bionic", Distro::UbuntuBionic)
+ .Case("cosmic", Distro::UbuntuCosmic)
+ .Case("disco", Distro::UbuntuDisco)
+ .Case("eoan", Distro::UbuntuEoan)
+ .Case("focal", Distro::UbuntuFocal)
+ .Case("groovy", Distro::UbuntuGroovy)
+ .Case("hirsute", Distro::UbuntuHirsute)
+ .Default(Distro::UnknownDistro);
+ return Version;
+}
+
+static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
+ Distro::DistroType Version = Distro::UnknownDistro;
+
+ // Newer freedesktop.org's compilant systemd-based systems
+ // should provide /etc/os-release or /usr/lib/os-release.
+ Version = DetectOsRelease(VFS);
+ if (Version != Distro::UnknownDistro)
+ return Version;
+
+ // Older systems might provide /etc/lsb-release.
+ Version = DetectLsbRelease(VFS);
+ if (Version != Distro::UnknownDistro)
+ return Version;
+
+ // Otherwise try some distro-specific quirks for RedHat...
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
+ VFS.getBufferForFile("/etc/redhat-release");
- File = VFS.getBufferForFile("/etc/redhat-release");
if (File) {
StringRef Data = File.get()->getBuffer();
if (Data.startswith("Fedora release"))
return Distro::Fedora;
if (Data.startswith("Red Hat Enterprise Linux") ||
- Data.startswith("CentOS") ||
- Data.startswith("Scientific Linux")) {
+ Data.startswith("CentOS") || Data.startswith("Scientific Linux")) {
if (Data.find("release 7") != StringRef::npos)
return Distro::RHEL7;
else if (Data.find("release 6") != StringRef::npos)
@@ -95,6 +126,7 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS,
return Distro::UnknownDistro;
}
+ // ...for Debian
File = VFS.getBufferForFile("/etc/debian_version");
if (File) {
StringRef Data = File.get()->getBuffer();
@@ -130,18 +162,20 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS,
.Default(Distro::UnknownDistro);
}
+ // ...for SUSE
File = VFS.getBufferForFile("/etc/SuSE-release");
if (File) {
StringRef Data = File.get()->getBuffer();
SmallVector<StringRef, 8> Lines;
Data.split(Lines, "\n");
- for (const StringRef& Line : Lines) {
+ for (const StringRef &Line : Lines) {
if (!Line.trim().startswith("VERSION"))
continue;
std::pair<StringRef, StringRef> SplitLine = Line.split('=');
// Old versions have split VERSION and PATCHLEVEL
// Newer versions use VERSION = x.y
- std::pair<StringRef, StringRef> SplitVer = SplitLine.second.trim().split('.');
+ std::pair<StringRef, StringRef> SplitVer =
+ SplitLine.second.trim().split('.');
int Version;
// OpenSUSE/SLES 10 and older are not supported and not compatible
@@ -153,6 +187,7 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS,
return Distro::UnknownDistro;
}
+ // ...and others.
if (VFS.exists("/etc/exherbo-release"))
return Distro::Exherbo;
@@ -168,5 +203,34 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS,
return Distro::UnknownDistro;
}
+static Distro::DistroType GetDistro(llvm::vfs::FileSystem &VFS,
+ const llvm::Triple &TargetOrHost) {
+ // If we don't target Linux, no need to check the distro. This saves a few
+ // OS calls.
+ if (!TargetOrHost.isOSLinux())
+ return Distro::UnknownDistro;
+
+ // True if we're backed by a real file system.
+ const bool onRealFS = (llvm::vfs::getRealFileSystem() == &VFS);
+
+ // If the host is not running Linux, and we're backed by a real file
+ // system, no need to check the distro. This is the case where someone
+ // is cross-compiling from BSD or Windows to Linux, and it would be
+ // meaningless to try to figure out the "distro" of the non-Linux host.
+ llvm::Triple HostTriple(llvm::sys::getProcessTriple());
+ if (!HostTriple.isOSLinux() && onRealFS)
+ return Distro::UnknownDistro;
+
+ if (onRealFS) {
+ // If we're backed by a real file system, perform
+ // the detection only once and save the result.
+ static Distro::DistroType LinuxDistro = DetectDistro(VFS);
+ return LinuxDistro;
+ }
+ // This is mostly for passing tests which uses llvm::vfs::InMemoryFileSystem,
+ // which is not "real".
+ return DetectDistro(VFS);
+}
+
Distro::Distro(llvm::vfs::FileSystem &VFS, const llvm::Triple &TargetOrHost)
- : DistroVal(DetectDistro(VFS, TargetOrHost)) {}
+ : DistroVal(GetDistro(VFS, TargetOrHost)) {}
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index ece8222dcf24..418e1d3e8ec9 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -46,6 +46,8 @@
#include "ToolChains/VEToolchain.h"
#include "ToolChains/WebAssembly.h"
#include "ToolChains/XCore.h"
+#include "ToolChains/ZOS.h"
+#include "clang/Basic/TargetID.h"
#include "clang/Basic/Version.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
@@ -70,6 +72,7 @@
#include "llvm/Option/Option.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ExitCodes.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Host.h"
@@ -86,13 +89,17 @@
#include <utility>
#if LLVM_ON_UNIX
#include <unistd.h> // getpid
-#include <sysexits.h> // EX_IOERR
#endif
using namespace clang::driver;
using namespace clang;
using namespace llvm::opt;
+static llvm::Triple getHIPOffloadTargetTriple() {
+ static const llvm::Triple T("amdgcn-amd-amdhsa");
+ return T;
+}
+
// static
std::string Driver::GetResourcesPath(StringRef BinaryPath,
StringRef CustomResourceDir) {
@@ -121,12 +128,12 @@ std::string Driver::GetResourcesPath(StringRef BinaryPath,
}
Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
- DiagnosticsEngine &Diags,
+ DiagnosticsEngine &Diags, std::string Title,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS)
: Diags(Diags), VFS(std::move(VFS)), Mode(GCCMode),
SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone), LTOMode(LTOK_None),
ClangExecutable(ClangExecutable), SysRoot(DEFAULT_SYSROOT),
- DriverTitle("clang LLVM compiler"), CCPrintOptionsFilename(nullptr),
+ DriverTitle(Title), CCPrintOptionsFilename(nullptr),
CCPrintHeadersFilename(nullptr), CCLogDiagnosticsFilename(nullptr),
CCCPrintBindings(false), CCPrintOptions(false), CCPrintHeaders(false),
CCLogDiagnostics(false), CCGenDiagnostics(false),
@@ -204,6 +211,11 @@ InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
getIncludeExcludeOptionFlagMasks(IsClCompatMode);
+ // Make sure that Flang-only options don't pollute the Clang output
+ // TODO: Make sure that Clang-only options don't pollute Flang output
+ if (!IsFlangMode())
+ ExcludedFlagsBitmask |= options::FlangOnlyOption;
+
unsigned MissingArgIndex, MissingArgCount;
InputArgList Args =
getOpts().ParseArgs(ArgStrings, MissingArgIndex, MissingArgCount,
@@ -672,10 +684,8 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
} else if (IsHIP) {
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
const llvm::Triple &HostTriple = HostTC->getTriple();
- StringRef DeviceTripleStr;
auto OFK = Action::OFK_HIP;
- DeviceTripleStr = "amdgcn-amd-amdhsa";
- llvm::Triple HIPTriple(DeviceTripleStr);
+ llvm::Triple HIPTriple = getHIPOffloadTargetTriple();
// Use the HIP and host triples as the key into the ToolChains map,
// because the device toolchain we create depends on both.
auto &HIPTC = ToolChains[HIPTriple.str() + "/" + HostTriple.str()];
@@ -769,10 +779,9 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
/// by Dirs.
///
static bool searchForFile(SmallVectorImpl<char> &FilePath,
- ArrayRef<std::string> Dirs,
- StringRef FileName) {
+ ArrayRef<StringRef> Dirs, StringRef FileName) {
SmallString<128> WPath;
- for (const std::string &Dir : Dirs) {
+ for (const StringRef &Dir : Dirs) {
if (Dir.empty())
continue;
WPath.clear();
@@ -797,7 +806,7 @@ bool Driver::readConfigFile(StringRef FileName) {
// Read options from config file.
llvm::SmallString<128> CfgFileName(FileName);
llvm::sys::path::native(CfgFileName);
- ConfigFile = std::string(CfgFileName.str());
+ ConfigFile = std::string(CfgFileName);
bool ContainErrors;
CfgOptions = std::make_unique<InputArgList>(
ParseArgStrings(NewCfgArgs, IsCLMode(), ContainErrors));
@@ -854,9 +863,10 @@ bool Driver::loadConfigFile() {
std::vector<std::string> ConfigFiles =
CLOptions->getAllArgValues(options::OPT_config);
if (ConfigFiles.size() > 1) {
- if (!std::all_of(
- ConfigFiles.begin(), ConfigFiles.end(),
- [ConfigFiles](std::string s) { return s == ConfigFiles[0]; })) {
+ if (!std::all_of(ConfigFiles.begin(), ConfigFiles.end(),
+ [ConfigFiles](const std::string &s) {
+ return s == ConfigFiles[0];
+ })) {
Diag(diag::err_drv_duplicate_config);
return true;
}
@@ -929,10 +939,7 @@ bool Driver::loadConfigFile() {
}
// Prepare list of directories where config file is searched for.
- SmallVector<std::string, 3> CfgFileSearchDirs;
- CfgFileSearchDirs.push_back(UserConfigDir);
- CfgFileSearchDirs.push_back(SystemConfigDir);
- CfgFileSearchDirs.push_back(Dir);
+ StringRef CfgFileSearchDirs[] = {UserConfigDir, SystemConfigDir, Dir};
// Try to find config file. First try file with corrected architecture.
llvm::SmallString<128> CfgFilePath;
@@ -963,7 +970,7 @@ bool Driver::loadConfigFile() {
// --config. If it was deduced from executable name, it is not an error.
if (FileSpecifiedExplicitly) {
Diag(diag::err_drv_config_file_not_found) << CfgFileName;
- for (const std::string &SearchDir : CfgFileSearchDirs)
+ for (const StringRef &SearchDir : CfgFileSearchDirs)
if (!SearchDir.empty())
Diag(diag::note_drv_config_file_searched_in) << SearchDir;
return true;
@@ -978,17 +985,6 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// FIXME: Handle environment options which affect driver behavior, somewhere
// (client?). GCC_EXEC_PREFIX, LPATH, CC_PRINT_OPTIONS.
- if (Optional<std::string> CompilerPathValue =
- llvm::sys::Process::GetEnv("COMPILER_PATH")) {
- StringRef CompilerPath = *CompilerPathValue;
- while (!CompilerPath.empty()) {
- std::pair<StringRef, StringRef> Split =
- CompilerPath.split(llvm::sys::EnvPathSeparator);
- PrefixDirs.push_back(std::string(Split.first));
- CompilerPath = Split.second;
- }
- }
-
// We look for the driver mode option early, because the mode can affect
// how other options are parsed.
ParseDriverMode(ClangExecutable, ArgList.slice(1));
@@ -1013,13 +1009,15 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// objects than Args. This copies an Arg from one of those other InputArgLists
// to the ownership of Args.
auto appendOneArg = [&Args](const Arg *Opt, const Arg *BaseArg) {
- unsigned Index = Args.MakeIndex(Opt->getSpelling());
- Arg *Copy = new llvm::opt::Arg(Opt->getOption(), Opt->getSpelling(),
- Index, BaseArg);
- Copy->getValues() = Opt->getValues();
- if (Opt->isClaimed())
- Copy->claim();
- Args.append(Copy);
+ unsigned Index = Args.MakeIndex(Opt->getSpelling());
+ Arg *Copy = new llvm::opt::Arg(Opt->getOption(), Args.getArgString(Index),
+ Index, BaseArg);
+ Copy->getValues() = Opt->getValues();
+ if (Opt->isClaimed())
+ Copy->claim();
+ Copy->setOwnsValues(Opt->getOwnsValues());
+ Opt->setOwnsValues(false);
+ Args.append(Copy);
};
if (HasConfigFile)
@@ -1106,6 +1104,16 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
A->claim();
PrefixDirs.push_back(A->getValue(0));
}
+ if (Optional<std::string> CompilerPathValue =
+ llvm::sys::Process::GetEnv("COMPILER_PATH")) {
+ StringRef CompilerPath = *CompilerPathValue;
+ while (!CompilerPath.empty()) {
+ std::pair<StringRef, StringRef> Split =
+ CompilerPath.split(llvm::sys::EnvPathSeparator);
+ PrefixDirs.push_back(std::string(Split.first));
+ CompilerPath = Split.second;
+ }
+ }
if (const Arg *A = Args.getLastArg(options::OPT__sysroot_EQ))
SysRoot = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT__dyld_prefix_EQ))
@@ -1567,6 +1575,11 @@ void Driver::PrintHelp(bool ShowHidden) const {
if (!ShowHidden)
ExcludedFlagsBitmask |= HelpHidden;
+ if (IsFlangMode())
+ IncludedFlagsBitmask |= options::FlangOption;
+ else
+ ExcludedFlagsBitmask |= options::FlangOnlyOption;
+
std::string Usage = llvm::formatv("{0} [options] file...", Name).str();
getOpts().PrintHelp(llvm::outs(), Usage.c_str(), DriverTitle.c_str(),
IncludedFlagsBitmask, ExcludedFlagsBitmask,
@@ -1574,9 +1587,13 @@ void Driver::PrintHelp(bool ShowHidden) const {
}
void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
- // FIXME: The following handlers should use a callback mechanism, we don't
- // know what the client would like to do.
- OS << getClangFullVersion() << '\n';
+ if (IsFlangMode()) {
+ OS << getClangToolFullVersion("flang-new") << '\n';
+ } else {
+ // FIXME: The following handlers should use a callback mechanism, we don't
+ // know what the client would like to do.
+ OS << getClangFullVersion() << '\n';
+ }
const ToolChain &TC = C.getDefaultToolChain();
OS << "Target: " << TC.getTripleString() << '\n';
@@ -1614,9 +1631,14 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
std::vector<std::string> SuggestedCompletions;
std::vector<std::string> Flags;
- unsigned short DisableFlags =
+ unsigned int DisableFlags =
options::NoDriverOption | options::Unsupported | options::Ignored;
+ // Make sure that Flang-only options don't pollute the Clang output
+ // TODO: Make sure that Clang-only options don't pollute Flang output
+ if (!IsFlangMode())
+ DisableFlags |= options::FlangOnlyOption;
+
// Distinguish "--autocomplete=-someflag" and "--autocomplete=-someflag,"
// because the latter indicates that the user put space before pushing tab
// which should end up in a file completion.
@@ -1749,6 +1771,13 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
if (C.getArgs().hasArg(options::OPT_print_search_dirs)) {
llvm::outs() << "programs: =";
bool separator = false;
+ // Print -B and COMPILER_PATH.
+ for (const std::string &Path : PrefixDirs) {
+ if (separator)
+ llvm::outs() << llvm::sys::EnvPathSeparator;
+ llvm::outs() << Path;
+ separator = true;
+ }
for (const std::string &Path : TC.getProgramPaths()) {
if (separator)
llvm::outs() << llvm::sys::EnvPathSeparator;
@@ -2067,7 +2096,7 @@ bool Driver::DiagnoseInputExistence(const DerivedArgList &Args, StringRef Value,
if (IsCLMode()) {
if (!llvm::sys::path::is_absolute(Twine(Value)) &&
- llvm::sys::Process::FindInEnvPath("LIB", Value))
+ llvm::sys::Process::FindInEnvPath("LIB", Value, ';'))
return true;
if (Args.hasArg(options::OPT__SLASH_link) && Ty == types::TY_Object) {
@@ -2384,8 +2413,20 @@ class OffloadingActionBuilder final {
bool EmitLLVM = false;
bool EmitAsm = false;
+ /// ID to identify each device compilation. For CUDA it is simply the
+ /// GPU arch string. For HIP it is either the GPU arch string or GPU
+ /// arch string plus feature strings delimited by a plus sign, e.g.
+ /// gfx906+xnack.
+ struct TargetID {
+ /// Target ID string which is persistent throughout the compilation.
+ const char *ID;
+ TargetID(CudaArch Arch) { ID = CudaArchToString(Arch); }
+ TargetID(const char *ID) : ID(ID) {}
+ operator const char *() { return ID; }
+ operator StringRef() { return StringRef(ID); }
+ };
/// List of GPU architectures to use in this compilation.
- SmallVector<CudaArch, 4> GpuArchList;
+ SmallVector<TargetID, 4> GpuArchList;
/// The CUDA actions for the current input.
ActionList CudaDeviceActions;
@@ -2421,8 +2462,9 @@ class OffloadingActionBuilder final {
// If the host input is not CUDA or HIP, we don't need to bother about
// this input.
- if (IA->getType() != types::TY_CUDA &&
- IA->getType() != types::TY_HIP) {
+ if (!(IA->getType() == types::TY_CUDA ||
+ IA->getType() == types::TY_HIP ||
+ IA->getType() == types::TY_PP_HIP)) {
// The builder will ignore this input.
IsActive = false;
return ABRT_Inactive;
@@ -2450,7 +2492,7 @@ class OffloadingActionBuilder final {
// If -fgpu-rdc is disabled, should not unbundle since there is no
// device code to link.
- if (!Relocatable)
+ if (UA->getType() == types::TY_Object && !Relocatable)
return ABRT_Inactive;
CudaDeviceActions.clear();
@@ -2468,7 +2510,7 @@ class OffloadingActionBuilder final {
for (auto Arch : GpuArchList) {
CudaDeviceActions.push_back(UA);
- UA->registerDependentActionInfo(ToolChains[0], CudaArchToString(Arch),
+ UA->registerDependentActionInfo(ToolChains[0], Arch,
AssociatedOffloadKind);
}
return ABRT_Success;
@@ -2479,16 +2521,15 @@ class OffloadingActionBuilder final {
void appendTopLevelActions(ActionList &AL) override {
// Utility to append actions to the top level list.
- auto AddTopLevel = [&](Action *A, CudaArch BoundArch) {
+ auto AddTopLevel = [&](Action *A, TargetID TargetID) {
OffloadAction::DeviceDependences Dep;
- Dep.add(*A, *ToolChains.front(), CudaArchToString(BoundArch),
- AssociatedOffloadKind);
+ Dep.add(*A, *ToolChains.front(), TargetID, AssociatedOffloadKind);
AL.push_back(C.MakeAction<OffloadAction>(Dep, A->getType()));
};
// If we have a fat binary, add it to the list.
if (CudaFatBinary) {
- AddTopLevel(CudaFatBinary, CudaArch::UNKNOWN);
+ AddTopLevel(CudaFatBinary, CudaArch::UNUSED);
CudaDeviceActions.clear();
CudaFatBinary = nullptr;
return;
@@ -2510,6 +2551,13 @@ class OffloadingActionBuilder final {
CudaDeviceActions.clear();
}
+ /// Get canonicalized offload arch option. \returns empty StringRef if the
+ /// option is invalid.
+ virtual StringRef getCanonicalOffloadArch(StringRef Arch) = 0;
+
+ virtual llvm::Optional<std::pair<llvm::StringRef, llvm::StringRef>>
+ getConflictOffloadArchCombination(const std::set<StringRef> &GpuArchs) = 0;
+
bool initialize() override {
assert(AssociatedOffloadKind == Action::OFK_Cuda ||
AssociatedOffloadKind == Action::OFK_HIP);
@@ -2557,7 +2605,7 @@ class OffloadingActionBuilder final {
EmitAsm = Args.getLastArg(options::OPT_S);
// Collect all cuda_gpu_arch parameters, removing duplicates.
- std::set<CudaArch> GpuArchs;
+ std::set<StringRef> GpuArchs;
bool Error = false;
for (Arg *A : Args) {
if (!(A->getOption().matches(options::OPT_offload_arch_EQ) ||
@@ -2565,27 +2613,35 @@ class OffloadingActionBuilder final {
continue;
A->claim();
- const StringRef ArchStr = A->getValue();
+ StringRef ArchStr = A->getValue();
if (A->getOption().matches(options::OPT_no_offload_arch_EQ) &&
ArchStr == "all") {
GpuArchs.clear();
continue;
}
- CudaArch Arch = StringToCudaArch(ArchStr);
- if (Arch == CudaArch::UNKNOWN) {
- C.getDriver().Diag(clang::diag::err_drv_cuda_bad_gpu_arch) << ArchStr;
+ ArchStr = getCanonicalOffloadArch(ArchStr);
+ if (ArchStr.empty()) {
Error = true;
} else if (A->getOption().matches(options::OPT_offload_arch_EQ))
- GpuArchs.insert(Arch);
+ GpuArchs.insert(ArchStr);
else if (A->getOption().matches(options::OPT_no_offload_arch_EQ))
- GpuArchs.erase(Arch);
+ GpuArchs.erase(ArchStr);
else
llvm_unreachable("Unexpected option.");
}
+ auto &&ConflictingArchs = getConflictOffloadArchCombination(GpuArchs);
+ if (ConflictingArchs) {
+ C.getDriver().Diag(clang::diag::err_drv_bad_offload_arch_combo)
+ << ConflictingArchs.getValue().first
+ << ConflictingArchs.getValue().second;
+ C.setContainsError();
+ return true;
+ }
+
// Collect list of GPUs remaining in the set.
- for (CudaArch Arch : GpuArchs)
- GpuArchList.push_back(Arch);
+ for (auto Arch : GpuArchs)
+ GpuArchList.push_back(Arch.data());
// Default to sm_20 which is the lowest common denominator for
// supported GPUs. sm_20 code should work correctly, if
@@ -2607,6 +2663,21 @@ class OffloadingActionBuilder final {
DefaultCudaArch = CudaArch::SM_20;
}
+ StringRef getCanonicalOffloadArch(StringRef ArchStr) override {
+ CudaArch Arch = StringToCudaArch(ArchStr);
+ if (Arch == CudaArch::UNKNOWN) {
+ C.getDriver().Diag(clang::diag::err_drv_cuda_bad_gpu_arch) << ArchStr;
+ return StringRef();
+ }
+ return CudaArchToString(Arch);
+ }
+
+ llvm::Optional<std::pair<llvm::StringRef, llvm::StringRef>>
+ getConflictOffloadArchCombination(
+ const std::set<StringRef> &GpuArchs) override {
+ return llvm::None;
+ }
+
ActionBuilderReturnCode
getDeviceDependences(OffloadAction::DeviceDependences &DA,
phases::ID CurPhase, phases::ID FinalPhase,
@@ -2666,8 +2737,7 @@ class OffloadingActionBuilder final {
for (auto &A : {AssembleAction, BackendAction}) {
OffloadAction::DeviceDependences DDep;
- DDep.add(*A, *ToolChains.front(), CudaArchToString(GpuArchList[I]),
- Action::OFK_Cuda);
+ DDep.add(*A, *ToolChains.front(), GpuArchList[I], Action::OFK_Cuda);
DeviceActions.push_back(
C.MakeAction<OffloadAction>(DDep, A->getType()));
}
@@ -2726,6 +2796,25 @@ class OffloadingActionBuilder final {
bool canUseBundlerUnbundler() const override { return true; }
+ StringRef getCanonicalOffloadArch(StringRef IdStr) override {
+ llvm::StringMap<bool> Features;
+ auto ArchStr =
+ parseTargetID(getHIPOffloadTargetTriple(), IdStr, &Features);
+ if (!ArchStr) {
+ C.getDriver().Diag(clang::diag::err_drv_bad_target_id) << IdStr;
+ C.setContainsError();
+ return StringRef();
+ }
+ auto CanId = getCanonicalTargetID(ArchStr.getValue(), Features);
+ return Args.MakeArgStringRef(CanId);
+ };
+
+ llvm::Optional<std::pair<llvm::StringRef, llvm::StringRef>>
+ getConflictOffloadArchCombination(
+ const std::set<StringRef> &GpuArchs) override {
+ return getConflictTargetIDCombination(GpuArchs);
+ }
+
ActionBuilderReturnCode
getDeviceDependences(OffloadAction::DeviceDependences &DA,
phases::ID CurPhase, phases::ID FinalPhase,
@@ -2770,8 +2859,8 @@ class OffloadingActionBuilder final {
// device arch of the next action being propagated to the above link
// action.
OffloadAction::DeviceDependences DDep;
- DDep.add(*CudaDeviceActions[I], *ToolChains.front(),
- CudaArchToString(GpuArchList[I]), AssociatedOffloadKind);
+ DDep.add(*CudaDeviceActions[I], *ToolChains.front(), GpuArchList[I],
+ AssociatedOffloadKind);
CudaDeviceActions[I] = C.MakeAction<OffloadAction>(
DDep, CudaDeviceActions[I]->getType());
}
@@ -2838,7 +2927,7 @@ class OffloadingActionBuilder final {
// LI contains all the inputs for the linker.
OffloadAction::DeviceDependences DeviceLinkDeps;
DeviceLinkDeps.add(*DeviceLinkAction, *ToolChains[0],
- CudaArchToString(GpuArchList[I]), AssociatedOffloadKind);
+ GpuArchList[I], AssociatedOffloadKind);
AL.push_back(C.MakeAction<OffloadAction>(DeviceLinkDeps,
DeviceLinkAction->getType()));
++I;
@@ -3164,7 +3253,8 @@ public:
// the input is not a bundle.
if (CanUseBundler && isa<InputAction>(HostAction) &&
InputArg->getOption().getKind() == llvm::opt::Option::InputClass &&
- !types::isSrcFile(HostAction->getType())) {
+ (!types::isSrcFile(HostAction->getType()) ||
+ HostAction->getType() == types::TY_PP_HIP)) {
auto UnbundlingHostAction =
C.MakeAction<OffloadUnbundlingJobAction>(HostAction);
UnbundlingHostAction->registerDependentActionInfo(
@@ -3799,9 +3889,15 @@ void Driver::BuildJobs(Compilation &C) const {
}
}
+ const llvm::Triple &RawTriple = C.getDefaultToolChain().getTriple();
+ if (RawTriple.isOSAIX())
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_G))
+ Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << RawTriple.str();
+
// Collect the list of architectures.
llvm::StringSet<> ArchNames;
- if (C.getDefaultToolChain().getTriple().isOSBinFormatMachO())
+ if (RawTriple.isOSBinFormatMachO())
for (const Arg *A : C.getArgs())
if (A->getOption().matches(options::OPT_arch))
ArchNames.insert(A->getValue());
@@ -3831,11 +3927,70 @@ void Driver::BuildJobs(Compilation &C) const {
/*TargetDeviceOffloadKind*/ Action::OFK_None);
}
- // If we have more than one job, then disable integrated-cc1 for now.
- if (C.getJobs().size() > 1)
+ StringRef StatReportFile;
+ bool PrintProcessStat = false;
+ if (const Arg *A = C.getArgs().getLastArg(options::OPT_fproc_stat_report_EQ))
+ StatReportFile = A->getValue();
+ if (C.getArgs().hasArg(options::OPT_fproc_stat_report))
+ PrintProcessStat = true;
+
+ // If we have more than one job, then disable integrated-cc1 for now. Do this
+ // also when we need to report process execution statistics.
+ if (C.getJobs().size() > 1 || !StatReportFile.empty() || PrintProcessStat)
for (auto &J : C.getJobs())
J.InProcess = false;
+ if (!StatReportFile.empty() || PrintProcessStat) {
+ C.setPostCallback([=](const Command &Cmd, int Res) {
+ Optional<llvm::sys::ProcessStatistics> ProcStat =
+ Cmd.getProcessStatistics();
+ if (!ProcStat)
+ return;
+ if (PrintProcessStat) {
+ using namespace llvm;
+ // Human readable output.
+ outs() << sys::path::filename(Cmd.getExecutable()) << ": "
+ << "output=";
+ if (Cmd.getOutputFilenames().empty())
+ outs() << "\"\"";
+ else
+ outs() << Cmd.getOutputFilenames().front();
+ outs() << ", total="
+ << format("%.3f", ProcStat->TotalTime.count() / 1000.) << " ms"
+ << ", user="
+ << format("%.3f", ProcStat->UserTime.count() / 1000.) << " ms"
+ << ", mem=" << ProcStat->PeakMemory << " Kb\n";
+ }
+ if (!StatReportFile.empty()) {
+ // CSV format.
+ std::string Buffer;
+ llvm::raw_string_ostream Out(Buffer);
+ llvm::sys::printArg(Out, llvm::sys::path::filename(Cmd.getExecutable()),
+ /*Quote*/ true);
+ Out << ',';
+ if (Cmd.getOutputFilenames().empty())
+ Out << "\"\"";
+ else
+ llvm::sys::printArg(Out, Cmd.getOutputFilenames().front(), true);
+ Out << ',' << ProcStat->TotalTime.count() << ','
+ << ProcStat->UserTime.count() << ',' << ProcStat->PeakMemory
+ << '\n';
+ Out.flush();
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(StatReportFile, EC, llvm::sys::fs::OF_Append);
+ if (EC)
+ return;
+ auto L = OS.lock();
+ if (!L) {
+ llvm::errs() << "ERROR: Cannot lock file " << StatReportFile << ": "
+ << toString(L.takeError()) << "\n";
+ return;
+ }
+ OS << Buffer;
+ }
+ });
+ }
+
// If the user passed -Qunused-arguments or there were errors, don't warn
// about any unused arguments.
if (Diags.hasErrorOccurred() ||
@@ -4527,11 +4682,29 @@ static const char *MakeCLOutputFilename(const ArgList &Args, StringRef ArgValue,
return Args.MakeArgString(Filename.c_str());
}
+static bool HasPreprocessOutput(const Action &JA) {
+ if (isa<PreprocessJobAction>(JA))
+ return true;
+ if (isa<OffloadAction>(JA) && isa<PreprocessJobAction>(JA.getInputs()[0]))
+ return true;
+ if (isa<OffloadBundlingJobAction>(JA) &&
+ HasPreprocessOutput(*(JA.getInputs()[0])))
+ return true;
+ return false;
+}
+
const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
const char *BaseInput,
- StringRef BoundArch, bool AtTopLevel,
+ StringRef OrigBoundArch, bool AtTopLevel,
bool MultipleArchs,
StringRef OffloadingPrefix) const {
+ std::string BoundArch = OrigBoundArch.str();
+#if defined(_WIN32)
+ // BoundArch may contains ':', which is invalid in file names on Windows,
+ // therefore replace it with '%'.
+ std::replace(BoundArch.begin(), BoundArch.end(), ':', '@');
+#endif
+
llvm::PrettyStackTraceString CrashInfo("Computing output path");
// Output to a user requested destination?
if (AtTopLevel && !isa<DsymutilJobAction>(JA) && !isa<VerifyJobAction>(JA)) {
@@ -4552,8 +4725,9 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
}
// Default to writing to stdout?
- if (AtTopLevel && !CCGenDiagnostics && isa<PreprocessJobAction>(JA))
+ if (AtTopLevel && !CCGenDiagnostics && HasPreprocessOutput(JA)) {
return "-";
+ }
// Is this the assembly listing for /FA?
if (JA.getType() == types::TY_PP_Asm &&
@@ -4595,10 +4769,20 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
}
SmallString<128> BasePath(BaseInput);
+ SmallString<128> ExternalPath("");
StringRef BaseName;
// Dsymutil actions should use the full path.
- if (isa<DsymutilJobAction>(JA) || isa<VerifyJobAction>(JA))
+ if (isa<DsymutilJobAction>(JA) && C.getArgs().hasArg(options::OPT_dsym_dir)) {
+ ExternalPath += C.getArgs().getLastArg(options::OPT_dsym_dir)->getValue();
+ // We use posix style here because the tests (specifically
+ // darwin-dsymutil.c) demonstrate that posix style paths are acceptable
+ // even on Windows and if we don't then the similar test covering this
+ // fails.
+ llvm::sys::path::append(ExternalPath, llvm::sys::path::Style::posix,
+ llvm::sys::path::filename(BasePath));
+ BaseName = ExternalPath;
+ } else if (isa<DsymutilJobAction>(JA) || isa<VerifyJobAction>(JA))
BaseName = BasePath;
else
BaseName = llvm::sys::path::filename(BasePath);
@@ -4786,8 +4970,7 @@ void Driver::generatePrefixedToolNames(
Names.emplace_back((DefaultTargetTriple + "-" + Tool).str());
}
-static bool ScanDirForExecutable(SmallString<128> &Dir,
- const std::string &Name) {
+static bool ScanDirForExecutable(SmallString<128> &Dir, StringRef Name) {
llvm::sys::path::append(Dir, Name);
if (llvm::sys::fs::can_execute(Twine(Dir)))
return true;
@@ -4804,9 +4987,8 @@ std::string Driver::GetProgramPath(StringRef Name, const ToolChain &TC) const {
for (const auto &PrefixDir : PrefixDirs) {
if (llvm::sys::fs::is_directory(PrefixDir)) {
SmallString<128> P(PrefixDir);
- for (const auto &TargetSpecificExecutable : TargetSpecificExecutables)
- if (ScanDirForExecutable(P, TargetSpecificExecutable))
- return std::string(P.str());
+ if (ScanDirForExecutable(P, Name))
+ return std::string(P.str());
} else {
SmallString<128> P((PrefixDir + Name).str());
if (llvm::sys::fs::can_execute(Twine(P)))
@@ -4931,9 +5113,7 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
!Target.hasEnvironment())
TC = std::make_unique<toolchains::MipsLLVMToolChain>(*this, Target,
Args);
- else if (Target.getArch() == llvm::Triple::ppc ||
- Target.getArch() == llvm::Triple::ppc64 ||
- Target.getArch() == llvm::Triple::ppc64le)
+ else if (Target.isPPC())
TC = std::make_unique<toolchains::PPCLinuxToolChain>(*this, Target,
Args);
else if (Target.getArch() == llvm::Triple::ve)
@@ -4996,6 +5176,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::Hurd:
TC = std::make_unique<toolchains::Hurd>(*this, Target, Args);
break;
+ case llvm::Triple::ZOS:
+ TC = std::make_unique<toolchains::ZOS>(*this, Target, Args);
+ break;
default:
// Of these targets, Hexagon is the only one that might have
// an OS of Linux, in which case it got handled above already.
@@ -5029,7 +5212,11 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
break;
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
- TC = std::make_unique<toolchains::RISCVToolChain>(*this, Target, Args);
+ if (toolchains::RISCVToolChain::hasGCCToolchain(*this, Args))
+ TC =
+ std::make_unique<toolchains::RISCVToolChain>(*this, Target, Args);
+ else
+ TC = std::make_unique<toolchains::BareMetal>(*this, Target, Args);
break;
case llvm::Triple::ve:
TC = std::make_unique<toolchains::VEToolChain>(*this, Target, Args);
diff --git a/clang/lib/Driver/Job.cpp b/clang/lib/Driver/Job.cpp
index 4808a9f4628d..911fd5df3ca2 100644
--- a/clang/lib/Driver/Job.cpp
+++ b/clang/lib/Driver/Job.cpp
@@ -38,12 +38,15 @@ using namespace driver;
Command::Command(const Action &Source, const Tool &Creator,
ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments,
- ArrayRef<InputInfo> Inputs)
+ ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs)
: Source(Source), Creator(Creator), ResponseSupport(ResponseSupport),
Executable(Executable), Arguments(Arguments) {
for (const auto &II : Inputs)
if (II.isFilename())
InputFilenames.push_back(II.getFilename());
+ for (const auto &II : Outputs)
+ if (II.isFilename())
+ OutputFilenames.push_back(II.getFilename());
}
/// Check if the compiler flag in question should be skipped when
@@ -349,16 +352,17 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
auto Args = llvm::toStringRefArray(Argv.data());
return llvm::sys::ExecuteAndWait(Executable, Args, Env, Redirects,
- /*secondsToWait*/ 0,
- /*memoryLimit*/ 0, ErrMsg, ExecutionFailed);
+ /*secondsToWait*/ 0, /*memoryLimit*/ 0,
+ ErrMsg, ExecutionFailed, &ProcStat);
}
CC1Command::CC1Command(const Action &Source, const Tool &Creator,
ResponseFileSupport ResponseSupport,
const char *Executable,
const llvm::opt::ArgStringList &Arguments,
- ArrayRef<InputInfo> Inputs)
- : Command(Source, Creator, ResponseSupport, Executable, Arguments, Inputs) {
+ ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs)
+ : Command(Source, Creator, ResponseSupport, Executable, Arguments, Inputs,
+ Outputs) {
InProcess = true;
}
@@ -415,9 +419,10 @@ FallbackCommand::FallbackCommand(const Action &Source_, const Tool &Creator_,
const char *Executable_,
const llvm::opt::ArgStringList &Arguments_,
ArrayRef<InputInfo> Inputs,
+ ArrayRef<InputInfo> Outputs,
std::unique_ptr<Command> Fallback_)
: Command(Source_, Creator_, ResponseSupport, Executable_, Arguments_,
- Inputs),
+ Inputs, Outputs),
Fallback(std::move(Fallback_)) {}
void FallbackCommand::Print(raw_ostream &OS, const char *Terminator,
@@ -456,9 +461,10 @@ int FallbackCommand::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
ForceSuccessCommand::ForceSuccessCommand(
const Action &Source_, const Tool &Creator_,
ResponseFileSupport ResponseSupport, const char *Executable_,
- const llvm::opt::ArgStringList &Arguments_, ArrayRef<InputInfo> Inputs)
+ const llvm::opt::ArgStringList &Arguments_, ArrayRef<InputInfo> Inputs,
+ ArrayRef<InputInfo> Outputs)
: Command(Source_, Creator_, ResponseSupport, Executable_, Arguments_,
- Inputs) {}
+ Inputs, Outputs) {}
void ForceSuccessCommand::Print(raw_ostream &OS, const char *Terminator,
bool Quote, CrashReportInfo *CrashInfo) const {
diff --git a/clang/lib/Driver/SanitizerArgs.cpp b/clang/lib/Driver/SanitizerArgs.cpp
index bcc9ffc7ff8f..5c275353b679 100644
--- a/clang/lib/Driver/SanitizerArgs.cpp
+++ b/clang/lib/Driver/SanitizerArgs.cpp
@@ -60,8 +60,7 @@ static const SanitizerMask AlwaysRecoverable =
SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress;
static const SanitizerMask NeedsLTO = SanitizerKind::CFI;
static const SanitizerMask TrappingSupported =
- (SanitizerKind::Undefined & ~SanitizerKind::Vptr) |
- SanitizerKind::UnsignedIntegerOverflow | SanitizerKind::ImplicitConversion |
+ (SanitizerKind::Undefined & ~SanitizerKind::Vptr) | SanitizerKind::Integer |
SanitizerKind::Nullability | SanitizerKind::LocalBounds |
SanitizerKind::CFI | SanitizerKind::FloatDivideByZero |
SanitizerKind::ObjCCast;
@@ -240,6 +239,10 @@ static SanitizerMask parseSanitizeTrapArgs(const Driver &D,
return TrappingKinds;
}
+bool SanitizerArgs::needsFuzzerInterceptors() const {
+ return needsFuzzer() && !needsAsanRt() && !needsTsanRt() && !needsMsanRt();
+}
+
bool SanitizerArgs::needsUbsanRt() const {
// All of these include ubsan.
if (needsAsanRt() || needsMsanRt() || needsHwasanRt() || needsTsanRt() ||
@@ -491,8 +494,10 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
<< lastArgumentForMask(D, Args, Kinds & NeedsLTO) << "-flto";
}
- if ((Kinds & SanitizerKind::ShadowCallStack) && TC.getTriple().isAArch64() &&
- !llvm::AArch64::isX18ReservedByDefault(TC.getTriple()) &&
+ if ((Kinds & SanitizerKind::ShadowCallStack) &&
+ ((TC.getTriple().isAArch64() &&
+ !llvm::AArch64::isX18ReservedByDefault(TC.getTriple())) ||
+ TC.getTriple().isRISCV()) &&
!Args.hasArg(options::OPT_ffixed_x18)) {
D.Diag(diag::err_drv_argument_only_allowed_with)
<< lastArgumentForMask(D, Args, Kinds & SanitizerKind::ShadowCallStack)
@@ -862,6 +867,10 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
LinkCXXRuntimes) ||
D.CCCIsCXX();
+ NeedsMemProfRt = Args.hasFlag(options::OPT_fmemory_profile,
+ options::OPT_fmemory_profile_EQ,
+ options::OPT_fno_memory_profile, false);
+
// Finally, initialize the set of available and recoverable sanitizers.
Sanitizers.Mask |= Kinds;
RecoverableSanitizers.Mask |= RecoverableKinds;
@@ -922,10 +931,10 @@ static bool hasTargetFeatureMTE(const llvm::opt::ArgStringList &CmdArgs) {
void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
types::ID InputType) const {
- // NVPTX doesn't currently support sanitizers. Bailing out here means that
- // e.g. -fsanitize=address applies only to host code, which is what we want
- // for now.
- if (TC.getTriple().isNVPTX())
+ // NVPTX/AMDGPU doesn't currently support sanitizers. Bailing out here means
+ // that e.g. -fsanitize=address applies only to host code, which is what we
+ // want for now.
+ if (TC.getTriple().isNVPTX() || TC.getTriple().isAMDGPU())
return;
// Translate available CoverageFeatures to corresponding clang-cc1 flags.
@@ -1084,6 +1093,23 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
Sanitizers.has(SanitizerKind::Address))
CmdArgs.push_back("-fno-assume-sane-operator-new");
+ // libFuzzer wants to intercept calls to certain library functions, so the
+ // following -fno-builtin-* flags force the compiler to emit interposable
+ // libcalls to these functions. Other sanitizers effectively do the same thing
+ // by marking all library call sites with NoBuiltin attribute in their LLVM
+ // pass. (see llvm::maybeMarkSanitizerLibraryCallNoBuiltin)
+ if (Sanitizers.has(SanitizerKind::FuzzerNoLink)) {
+ CmdArgs.push_back("-fno-builtin-bcmp");
+ CmdArgs.push_back("-fno-builtin-memcmp");
+ CmdArgs.push_back("-fno-builtin-strncmp");
+ CmdArgs.push_back("-fno-builtin-strcmp");
+ CmdArgs.push_back("-fno-builtin-strncasecmp");
+ CmdArgs.push_back("-fno-builtin-strcasecmp");
+ CmdArgs.push_back("-fno-builtin-strstr");
+ CmdArgs.push_back("-fno-builtin-strcasestr");
+ CmdArgs.push_back("-fno-builtin-memmem");
+ }
+
// Require -fvisibility= flag on non-Windows when compiling if vptr CFI is
// enabled.
if (Sanitizers.hasOneOf(CFIClasses) && !TC.getTriple().isOSWindows() &&
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index b8c12fc9241a..b2ddef141a75 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -232,12 +232,17 @@ StringRef ToolChain::getDefaultUniversalArchName() const {
// the same as the ones that appear in the triple. Roughly speaking, this is
// an inverse of the darwin::getArchTypeForDarwinArchName() function.
switch (Triple.getArch()) {
- case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64: {
+ if (getTriple().isArm64e())
+ return "arm64e";
return "arm64";
+ }
case llvm::Triple::aarch64_32:
return "arm64_32";
case llvm::Triple::ppc:
return "ppc";
+ case llvm::Triple::ppcle:
+ return "ppcle";
case llvm::Triple::ppc64:
return "ppc64";
case llvm::Triple::ppc64le:
@@ -391,6 +396,8 @@ StringRef ToolChain::getOSLibName() const {
return "openbsd";
case llvm::Triple::Solaris:
return "sunos";
+ case llvm::Triple::AIX:
+ return "aix";
default:
return getOS();
}
@@ -546,19 +553,54 @@ std::string ToolChain::GetProgramPath(const char *Name) const {
return D.GetProgramPath(Name, *this);
}
-std::string ToolChain::GetLinkerPath() const {
+std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD,
+ bool *LinkerIsLLDDarwinNew) const {
+ if (LinkerIsLLD)
+ *LinkerIsLLD = false;
+ if (LinkerIsLLDDarwinNew)
+ *LinkerIsLLDDarwinNew = false;
+
+ // Get -fuse-ld= first to prevent -Wunused-command-line-argument. -fuse-ld= is
+ // considered as the linker flavor, e.g. "bfd", "gold", or "lld".
const Arg* A = Args.getLastArg(options::OPT_fuse_ld_EQ);
StringRef UseLinker = A ? A->getValue() : CLANG_DEFAULT_LINKER;
+ // --ld-path= takes precedence over -fuse-ld= and specifies the executable
+ // name. -B, COMPILER_PATH and PATH and consulted if the value does not
+ // contain a path component separator.
+ if (const Arg *A = Args.getLastArg(options::OPT_ld_path_EQ)) {
+ std::string Path(A->getValue());
+ if (!Path.empty()) {
+ if (llvm::sys::path::parent_path(Path).empty())
+ Path = GetProgramPath(A->getValue());
+ if (llvm::sys::fs::can_execute(Path))
+ return std::string(Path);
+ }
+ getDriver().Diag(diag::err_drv_invalid_linker_name) << A->getAsString(Args);
+ return GetProgramPath(getDefaultLinker());
+ }
+ // If we're passed -fuse-ld= with no argument, or with the argument ld,
+ // then use whatever the default system linker is.
+ if (UseLinker.empty() || UseLinker == "ld") {
+ const char *DefaultLinker = getDefaultLinker();
+ if (llvm::sys::path::is_absolute(DefaultLinker))
+ return std::string(DefaultLinker);
+ else
+ return GetProgramPath(DefaultLinker);
+ }
+
+ // Extending -fuse-ld= to an absolute or relative path is unexpected. Checking
+ // for the linker flavor is brittle. In addition, prepending "ld." or "ld64."
+ // to a relative path is surprising. This is more complex due to priorities
+ // among -B, COMPILER_PATH and PATH. --ld-path= should be used instead.
+ if (UseLinker.find('/') != StringRef::npos)
+ getDriver().Diag(diag::warn_drv_fuse_ld_path);
+
if (llvm::sys::path::is_absolute(UseLinker)) {
// If we're passed what looks like an absolute path, don't attempt to
// second-guess that.
if (llvm::sys::fs::can_execute(UseLinker))
return std::string(UseLinker);
- } else if (UseLinker.empty() || UseLinker == "ld") {
- // If we're passed -fuse-ld= with no argument, or with the argument ld,
- // then use whatever the default system linker is.
- return GetProgramPath(getDefaultLinker());
} else {
llvm::SmallString<8> LinkerName;
if (Triple.isOSDarwin())
@@ -568,8 +610,14 @@ std::string ToolChain::GetLinkerPath() const {
LinkerName.append(UseLinker);
std::string LinkerPath(GetProgramPath(LinkerName.c_str()));
- if (llvm::sys::fs::can_execute(LinkerPath))
+ if (llvm::sys::fs::can_execute(LinkerPath)) {
+ // FIXME: Remove lld.darwinnew here once it's the only MachO lld.
+ if (LinkerIsLLD)
+ *LinkerIsLLD = UseLinker == "lld" || UseLinker == "lld.darwinnew";
+ if (LinkerIsLLDDarwinNew)
+ *LinkerIsLLDDarwinNew = UseLinker == "lld.darwinnew";
return LinkerPath;
+ }
}
if (A)
@@ -631,9 +679,7 @@ bool ToolChain::isThreadModelSupported(const StringRef Model) const {
return Triple.getArch() == llvm::Triple::arm ||
Triple.getArch() == llvm::Triple::armeb ||
Triple.getArch() == llvm::Triple::thumb ||
- Triple.getArch() == llvm::Triple::thumbeb ||
- Triple.getArch() == llvm::Triple::wasm32 ||
- Triple.getArch() == llvm::Triple::wasm64;
+ Triple.getArch() == llvm::Triple::thumbeb || Triple.isWasm();
} else if (Model == "posix")
return true;
@@ -665,6 +711,9 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
if (!Triple.isOSBinFormatMachO())
return getTripleString();
+ if (Triple.isArm64e())
+ return getTripleString();
+
// FIXME: older versions of ld64 expect the "arm64" component in the actual
// triple string and query it to determine whether an LTO file can be
// handled. Remove this when we don't care any more.
@@ -758,6 +807,37 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
}
Triple.setArchName(ArchName + Suffix.str());
+ bool isHardFloat =
+ (arm::getARMFloatABI(getDriver(), Triple, Args) == arm::FloatABI::Hard);
+ switch (Triple.getEnvironment()) {
+ case Triple::GNUEABI:
+ case Triple::GNUEABIHF:
+ Triple.setEnvironment(isHardFloat ? Triple::GNUEABIHF : Triple::GNUEABI);
+ break;
+ case Triple::EABI:
+ case Triple::EABIHF:
+ Triple.setEnvironment(isHardFloat ? Triple::EABIHF : Triple::EABI);
+ break;
+ case Triple::MuslEABI:
+ case Triple::MuslEABIHF:
+ Triple.setEnvironment(isHardFloat ? Triple::MuslEABIHF
+ : Triple::MuslEABI);
+ break;
+ default: {
+ arm::FloatABI DefaultABI = arm::getDefaultFloatABI(Triple);
+ if (DefaultABI != arm::FloatABI::Invalid &&
+ isHardFloat != (DefaultABI == arm::FloatABI::Hard)) {
+ Arg *ABIArg =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ);
+ assert(ABIArg && "Non-default float abi expected to be from arg");
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << ABIArg->getAsString(Args) << Triple.getTriple();
+ }
+ break;
+ }
+ }
+
return Triple.getTriple();
}
}
@@ -989,23 +1069,23 @@ SanitizerMask ToolChain::getSupportedSanitizers() const {
// Return sanitizers which don't require runtime support and are not
// platform dependent.
- SanitizerMask Res = (SanitizerKind::Undefined & ~SanitizerKind::Vptr &
- ~SanitizerKind::Function) |
- (SanitizerKind::CFI & ~SanitizerKind::CFIICall) |
- SanitizerKind::CFICastStrict |
- SanitizerKind::FloatDivideByZero |
- SanitizerKind::UnsignedIntegerOverflow |
- SanitizerKind::ImplicitConversion |
- SanitizerKind::Nullability | SanitizerKind::LocalBounds;
+ SanitizerMask Res =
+ (SanitizerKind::Undefined & ~SanitizerKind::Vptr &
+ ~SanitizerKind::Function) |
+ (SanitizerKind::CFI & ~SanitizerKind::CFIICall) |
+ SanitizerKind::CFICastStrict | SanitizerKind::FloatDivideByZero |
+ SanitizerKind::UnsignedIntegerOverflow |
+ SanitizerKind::UnsignedShiftBase | SanitizerKind::ImplicitConversion |
+ SanitizerKind::Nullability | SanitizerKind::LocalBounds;
if (getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64 ||
- getTriple().getArch() == llvm::Triple::arm ||
- getTriple().getArch() == llvm::Triple::wasm32 ||
- getTriple().getArch() == llvm::Triple::wasm64 || getTriple().isAArch64())
+ getTriple().getArch() == llvm::Triple::arm || getTriple().isWasm() ||
+ getTriple().isAArch64())
Res |= SanitizerKind::CFIICall;
- if (getTriple().getArch() == llvm::Triple::x86_64 || getTriple().isAArch64())
+ if (getTriple().getArch() == llvm::Triple::x86_64 ||
+ getTriple().isAArch64(64) || getTriple().isRISCV())
Res |= SanitizerKind::ShadowCallStack;
- if (getTriple().isAArch64())
+ if (getTriple().isAArch64(64))
Res |= SanitizerKind::MemTag;
return Res;
}
@@ -1163,15 +1243,18 @@ void ToolChain::TranslateXarchArgs(
//
// We also want to disallow any options which would alter the
// driver behavior; that isn't going to work in our model. We
- // use isDriverOption() as an approximation, although things
- // like -O4 are going to slip through.
+ // use options::NoXarchOption to control this.
if (!XarchArg || Index > Prev + 1) {
getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
<< A->getAsString(Args);
return;
- } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
- << A->getAsString(Args);
+ } else if (XarchArg->getOption().hasFlag(options::NoXarchOption)) {
+ auto &Diags = getDriver().getDiags();
+ unsigned DiagID =
+ Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "invalid Xarch argument: '%0', not all driver "
+ "options can be forwared via Xarch argument");
+ Diags.Report(DiagID) << A->getAsString(Args);
return;
}
XarchArg->setBaseArg(A);
diff --git a/clang/lib/Driver/ToolChains/AIX.cpp b/clang/lib/Driver/ToolChains/AIX.cpp
index ac5544eedb00..36fe578fcb3d 100644
--- a/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/clang/lib/Driver/ToolChains/AIX.cpp
@@ -44,12 +44,6 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-a64");
}
- // Accept an undefined symbol as an extern so that an error message is not
- // displayed. Otherwise, undefined symbols are flagged with error messages.
- // FIXME: This should be removed when the assembly generation from the
- // compiler is able to write externs properly.
- CmdArgs.push_back("-u");
-
// Accept any mixture of instructions.
// On Power for AIX and Linux, this behaviour matches that of GCC for both the
// user-provided assembler source case and the compiler-produced assembler
@@ -77,7 +71,7 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -98,6 +92,12 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("-bnso");
+ // Add options for shared libraries.
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-bM:SRE");
+ CmdArgs.push_back("-bnoentry");
+ }
+
// Specify linker output file.
assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
@@ -129,16 +129,20 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
return IsArch32Bit ? "crt0.o" : "crt0_64.o";
};
- if (!Args.hasArg(options::OPT_nostdlib)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_shared)) {
CmdArgs.push_back(
Args.MakeArgString(ToolChain.GetFilePath(getCrt0Basename())));
+
+ CmdArgs.push_back(Args.MakeArgString(
+ ToolChain.GetFilePath(IsArch32Bit ? "crti.o" : "crti_64.o")));
}
- // Collect all static constructor and destructor functions in CXX mode. This
- // has to come before AddLinkerInputs as the implied option needs to precede
- // any other '-bcdtors' settings or '-bnocdtors' that '-Wl' might forward.
- if (D.CCCIsCXX())
- CmdArgs.push_back("-bcdtors:all:0:s");
+ // Collect all static constructor and destructor functions in both C and CXX
+ // language link invocations. This has to come before AddLinkerInputs as the
+ // implied option needs to precede any other '-bcdtors' settings or
+ // '-bnocdtors' that '-Wl' might forward.
+ CmdArgs.push_back("-bcdtors:all:0:s");
// Specify linker input file(s).
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
@@ -146,18 +150,27 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Add directory to library search path.
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
+
+ if (getToolChain().ShouldLinkCXXStdlib(Args))
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
+
// Support POSIX threads if "-pthreads" or "-pthread" is present.
if (Args.hasArg(options::OPT_pthreads, options::OPT_pthread))
CmdArgs.push_back("-lpthreads");
+ if (D.CCCIsCXX())
+ CmdArgs.push_back("-lm");
+
CmdArgs.push_back("-lc");
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
/// AIX - AIX tool chain which can call as(1) and ld(1) directly.
@@ -203,6 +216,27 @@ void AIX::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addSystemInclude(DriverArgs, CC1Args, UP.str());
}
+void AIX::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ return;
+ case ToolChain::CST_Libstdcxx:
+ llvm::report_fatal_error("linking libstdc++ unimplemented on AIX");
+ }
+
+ llvm_unreachable("Unexpected C++ library type; only libc++ is supported.");
+}
+
+ToolChain::CXXStdlibType AIX::GetDefaultCXXStdlibType() const {
+ return ToolChain::CST_Libcxx;
+}
+
+ToolChain::RuntimeLibType AIX::GetDefaultRuntimeLibType() const {
+ return ToolChain::RLT_CompilerRT;
+}
+
auto AIX::buildAssembler() const -> Tool * { return new aix::Assembler(*this); }
auto AIX::buildLinker() const -> Tool * { return new aix::Linker(*this); }
diff --git a/clang/lib/Driver/ToolChains/AIX.h b/clang/lib/Driver/ToolChains/AIX.h
index 942bb3cceb8a..d4e593255736 100644
--- a/clang/lib/Driver/ToolChains/AIX.h
+++ b/clang/lib/Driver/ToolChains/AIX.h
@@ -67,6 +67,13 @@ public:
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ CXXStdlibType GetDefaultCXXStdlibType() const override;
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override;
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
diff --git a/clang/lib/Driver/ToolChains/AMDGPU.cpp b/clang/lib/Driver/ToolChains/AMDGPU.cpp
index bc6d1fcd4a00..0971a2da62a3 100644
--- a/clang/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/clang/lib/Driver/ToolChains/AMDGPU.cpp
@@ -9,6 +9,7 @@
#include "AMDGPU.h"
#include "CommonArgs.h"
#include "InputInfo.h"
+#include "clang/Basic/TargetID.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "llvm/Option/ArgList.h"
@@ -87,23 +88,30 @@ void RocmInstallationDetector::scanLibDevicePath(llvm::StringRef Path) {
}
}
-void RocmInstallationDetector::ParseHIPVersionFile(llvm::StringRef V) {
+// Parse and extract version numbers from `.hipVersion`. Return `true` if
+// the parsing fails.
+bool RocmInstallationDetector::parseHIPVersionFile(llvm::StringRef V) {
SmallVector<StringRef, 4> VersionParts;
V.split(VersionParts, '\n');
- unsigned Major;
- unsigned Minor;
+ unsigned Major = ~0U;
+ unsigned Minor = ~0U;
for (auto Part : VersionParts) {
- auto Splits = Part.split('=');
- if (Splits.first == "HIP_VERSION_MAJOR")
- Splits.second.getAsInteger(0, Major);
- else if (Splits.first == "HIP_VERSION_MINOR")
- Splits.second.getAsInteger(0, Minor);
- else if (Splits.first == "HIP_VERSION_PATCH")
+ auto Splits = Part.rtrim().split('=');
+ if (Splits.first == "HIP_VERSION_MAJOR") {
+ if (Splits.second.getAsInteger(0, Major))
+ return true;
+ } else if (Splits.first == "HIP_VERSION_MINOR") {
+ if (Splits.second.getAsInteger(0, Minor))
+ return true;
+ } else if (Splits.first == "HIP_VERSION_PATCH")
VersionPatch = Splits.second.str();
}
+ if (Major == ~0U || Minor == ~0U)
+ return true;
VersionMajorMinor = llvm::VersionTuple(Major, Minor);
DetectedVersion =
(Twine(Major) + "." + Twine(Minor) + "." + VersionPatch).str();
+ return false;
}
// For candidate specified by --rocm-path we do not do strict check.
@@ -244,9 +252,9 @@ void RocmInstallationDetector::detectDeviceLibrary() {
// - ${ROCM_ROOT}/lib/*
// - ${ROCM_ROOT}/lib/bitcode/*
// so try to detect these layouts.
- static llvm::SmallVector<const char *, 2> SubDirsList[] = {
+ static constexpr std::array<const char *, 2> SubDirsList[] = {
{"amdgcn", "bitcode"},
- {"lib"},
+ {"lib", ""},
{"lib", "bitcode"},
};
@@ -289,7 +297,8 @@ void RocmInstallationDetector::detectHIPRuntime() {
continue;
if (HIPVersionArg.empty() && VersionFile)
- ParseHIPVersionFile((*VersionFile)->getBuffer());
+ if (parseHIPVersionFile((*VersionFile)->getBuffer()))
+ continue;
HasHIPRuntime = true;
return;
@@ -350,31 +359,45 @@ void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::string Linker = getToolChain().GetProgramPath(getShortName());
ArgStringList CmdArgs;
+ addLinkerCompressDebugSectionsOption(getToolChain(), Args, CmdArgs);
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
CmdArgs.push_back("-shared");
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(
- std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
- Args.MakeArgString(Linker), CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker),
+ CmdArgs, Inputs, Output));
}
void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
const llvm::opt::ArgList &Args,
std::vector<StringRef> &Features) {
- if (const Arg *dAbi = Args.getLastArg(options::OPT_mamdgpu_debugger_abi))
- D.Diag(diag::err_drv_clang_unsupported) << dAbi->getAsString(Args);
+ // Add target ID features to -target-feature options. No diagnostics should
+ // be emitted here since invalid target ID is diagnosed at other places.
+ StringRef TargetID = Args.getLastArgValue(options::OPT_mcpu_EQ);
+ if (!TargetID.empty()) {
+ llvm::StringMap<bool> FeatureMap;
+ auto OptionalGpuArch = parseTargetID(Triple, TargetID, &FeatureMap);
+ if (OptionalGpuArch) {
+ StringRef GpuArch = OptionalGpuArch.getValue();
+ // Iterate through all possible target ID features for the given GPU.
+ // If it is mapped to true, add +feature.
+ // If it is mapped to false, add -feature.
+ // If it is not in the map (default), do not add it
+ for (auto &&Feature : getAllPossibleTargetIDFeatures(Triple, GpuArch)) {
+ auto Pos = FeatureMap.find(Feature);
+ if (Pos == FeatureMap.end())
+ continue;
+ Features.push_back(Args.MakeArgStringRef(
+ (Twine(Pos->second ? "+" : "-") + Feature).str()));
+ }
+ }
+ }
- if (Args.getLastArg(options::OPT_mwavefrontsize64)) {
- Features.push_back("-wavefrontsize16");
- Features.push_back("-wavefrontsize32");
+ if (Args.hasFlag(options::OPT_mwavefrontsize64,
+ options::OPT_mno_wavefrontsize64, false))
Features.push_back("+wavefrontsize64");
- }
- if (Args.getLastArg(options::OPT_mno_wavefrontsize64)) {
- Features.push_back("-wavefrontsize16");
- Features.push_back("+wavefrontsize32");
- Features.push_back("-wavefrontsize64");
- }
handleTargetFeaturesGroup(
Args, Features, options::OPT_m_amdgpu_Features_Group);
@@ -384,8 +407,14 @@ void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
AMDGPUToolChain::AMDGPUToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Generic_ELF(D, Triple, Args),
- OptionsDefault({{options::OPT_O, "3"},
- {options::OPT_cl_std_EQ, "CL1.2"}}) {}
+ OptionsDefault(
+ {{options::OPT_O, "3"}, {options::OPT_cl_std_EQ, "CL1.2"}}) {
+ // Check code object version options. Emit warnings for legacy options
+ // and errors for the last invalid code object version options.
+ // It is done here to avoid repeated warning or error messages for
+ // each tool invocation.
+ (void)getOrCheckAMDGPUCodeObjectVersion(D, Args, /*Diagnose=*/true);
+}
Tool *AMDGPUToolChain::buildLinker() const {
return new tools::amdgpu::Linker(*this);
@@ -398,16 +427,20 @@ AMDGPUToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
DerivedArgList *DAL =
Generic_ELF::TranslateArgs(Args, BoundArch, DeviceOffloadKind);
- // Do nothing if not OpenCL (-x cl)
- if (!Args.getLastArgValue(options::OPT_x).equals("cl"))
- return DAL;
+ const OptTable &Opts = getDriver().getOpts();
if (!DAL)
DAL = new DerivedArgList(Args.getBaseArgs());
- for (auto *A : Args)
- DAL->append(A);
- const OptTable &Opts = getDriver().getOpts();
+ for (Arg *A : Args) {
+ if (!shouldSkipArgument(A))
+ DAL->append(A);
+ }
+
+ checkTargetID(*DAL);
+
+ if (!Args.getLastArgValue(options::OPT_x).equals("cl"))
+ return DAL;
// Phase 1 (.cl -> .bc)
if (Args.hasArg(options::OPT_c) && Args.hasArg(options::OPT_emit_llvm)) {
@@ -452,7 +485,8 @@ llvm::DenormalMode AMDGPUToolChain::getDefaultDenormalModeForType(
if (JA.getOffloadingDeviceKind() == Action::OFK_HIP ||
JA.getOffloadingDeviceKind() == Action::OFK_Cuda) {
- auto Kind = llvm::AMDGPU::parseArchAMDGCN(JA.getOffloadingArch());
+ auto Arch = getProcessorFromTargetID(getTriple(), JA.getOffloadingArch());
+ auto Kind = llvm::AMDGPU::parseArchAMDGCN(Arch);
if (FPType && FPType == &llvm::APFloat::IEEEsingle() &&
DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
options::OPT_fno_cuda_flush_denormals_to_zero,
@@ -462,7 +496,7 @@ llvm::DenormalMode AMDGPUToolChain::getDefaultDenormalModeForType(
return llvm::DenormalMode::getIEEE();
}
- const StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
+ const StringRef GpuArch = getGPUArch(DriverArgs);
auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
// TODO: There are way too many flags that change this. Do we need to check
@@ -479,7 +513,7 @@ llvm::DenormalMode AMDGPUToolChain::getDefaultDenormalModeForType(
bool AMDGPUToolChain::isWave64(const llvm::opt::ArgList &DriverArgs,
llvm::AMDGPU::GPUKind Kind) {
const unsigned ArchAttr = llvm::AMDGPU::getArchAttrAMDGCN(Kind);
- static bool HasWave32 = (ArchAttr & llvm::AMDGPU::FEATURE_WAVE32);
+ bool HasWave32 = (ArchAttr & llvm::AMDGPU::FEATURE_WAVE32);
return !HasWave32 || DriverArgs.hasFlag(
options::OPT_mwavefrontsize64, options::OPT_mno_wavefrontsize64, false);
@@ -507,6 +541,25 @@ void AMDGPUToolChain::addClangTargetOptions(
}
}
+StringRef
+AMDGPUToolChain::getGPUArch(const llvm::opt::ArgList &DriverArgs) const {
+ return getProcessorFromTargetID(
+ getTriple(), DriverArgs.getLastArgValue(options::OPT_mcpu_EQ));
+}
+
+void AMDGPUToolChain::checkTargetID(
+ const llvm::opt::ArgList &DriverArgs) const {
+ StringRef TargetID = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
+ if (TargetID.empty())
+ return;
+
+ llvm::StringMap<bool> FeatureMap;
+ auto OptionalGpuArch = parseTargetID(getTriple(), TargetID, &FeatureMap);
+ if (!OptionalGpuArch) {
+ getDriver().Diag(clang::diag::err_drv_bad_target_id) << TargetID;
+ }
+}
+
void ROCMToolChain::addClangTargetOptions(
const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadingKind) const {
@@ -528,7 +581,7 @@ void ROCMToolChain::addClangTargetOptions(
}
// Get the device name and canonicalize it
- const StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
+ const StringRef GpuArch = getGPUArch(DriverArgs);
auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
@@ -594,3 +647,10 @@ void RocmInstallationDetector::addCommonBitcodeLibCC1Args(
CC1Args.push_back(LinkBitcodeFlag);
CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
}
+
+bool AMDGPUToolChain::shouldSkipArgument(const llvm::opt::Arg *A) const {
+ Option O = A->getOption();
+ if (O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie))
+ return true;
+ return false;
+}
diff --git a/clang/lib/Driver/ToolChains/AMDGPU.h b/clang/lib/Driver/ToolChains/AMDGPU.h
index 5d44faf28b05..55ef6e01967e 100644
--- a/clang/lib/Driver/ToolChains/AMDGPU.h
+++ b/clang/lib/Driver/ToolChains/AMDGPU.h
@@ -11,6 +11,7 @@
#include "Gnu.h"
#include "ROCm.h"
+#include "clang/Basic/TargetID.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -36,7 +37,8 @@ public:
const char *LinkingOutput) const override;
};
-void getAMDGPUTargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
+void getAMDGPUTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
std::vector<StringRef> &Features);
} // end namespace amdgpu
@@ -87,6 +89,16 @@ public:
/// Needed for translating LTO options.
const char *getDefaultLinker() const override { return "ld.lld"; }
+
+ /// Should skip argument.
+ bool shouldSkipArgument(const llvm::opt::Arg *Arg) const;
+
+protected:
+ /// Check and diagnose invalid target ID specified by -mcpu.
+ void checkTargetID(const llvm::opt::ArgList &DriverArgs) const;
+
+ /// Get GPU arch from -mcpu without checking.
+ StringRef getGPUArch(const llvm::opt::ArgList &DriverArgs) const;
};
class LLVM_LIBRARY_VISIBILITY ROCMToolChain : public AMDGPUToolChain {
diff --git a/clang/lib/Driver/ToolChains/AVR.cpp b/clang/lib/Driver/ToolChains/AVR.cpp
index 092bade53c63..ae56b7b5249e 100644
--- a/clang/lib/Driver/ToolChains/AVR.cpp
+++ b/clang/lib/Driver/ToolChains/AVR.cpp
@@ -13,6 +13,7 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/SubtargetFeature.h"
@@ -27,13 +28,272 @@ using namespace llvm::opt;
namespace {
-// TODO: Consider merging this into the AVR device table
-// array in Targets/AVR.cpp.
-llvm::Optional<StringRef> GetMcuFamilyName(StringRef MCU) {
- return llvm::StringSwitch<llvm::Optional<StringRef>>(MCU)
- .Case("atmega328", Optional<StringRef>("avr5"))
- .Case("atmega328p", Optional<StringRef>("avr5"))
- .Default(Optional<StringRef>());
+const struct {
+ StringRef Name;
+ std::string SubPath;
+ StringRef Family;
+} MCUInfo[] = {
+ {"at90s1200", "", "avr1"},
+ {"attiny11", "", "avr1"},
+ {"attiny12", "", "avr1"},
+ {"attiny15", "", "avr1"},
+ {"attiny28", "", "avr1"},
+ {"at90s2313", "tiny-stack", "avr2"},
+ {"at90s2323", "tiny-stack", "avr2"},
+ {"at90s2333", "tiny-stack", "avr2"},
+ {"at90s2343", "tiny-stack", "avr2"},
+ {"at90s4433", "tiny-stack", "avr2"},
+ {"attiny22", "tiny-stack", "avr2"},
+ {"attiny26", "tiny-stack", "avr2"},
+ {"at90s4414", "", "avr2"},
+ {"at90s4434", "", "avr2"},
+ {"at90s8515", "", "avr2"},
+ {"at90c8534", "", "avr2"},
+ {"at90s8535", "", "avr2"},
+ {"attiny13", "avr25/tiny-stack", "avr25"},
+ {"attiny13a", "avr25/tiny-stack", "avr25"},
+ {"attiny2313", "avr25/tiny-stack", "avr25"},
+ {"attiny2313a", "avr25/tiny-stack", "avr25"},
+ {"attiny24", "avr25/tiny-stack", "avr25"},
+ {"attiny24a", "avr25/tiny-stack", "avr25"},
+ {"attiny25", "avr25/tiny-stack", "avr25"},
+ {"attiny261", "avr25/tiny-stack", "avr25"},
+ {"attiny261a", "avr25/tiny-stack", "avr25"},
+ {"at86rf401", "avr25", "avr25"},
+ {"ata5272", "avr25", "avr25"},
+ {"attiny4313", "avr25", "avr25"},
+ {"attiny44", "avr25", "avr25"},
+ {"attiny44a", "avr25", "avr25"},
+ {"attiny84", "avr25", "avr25"},
+ {"attiny84a", "avr25", "avr25"},
+ {"attiny45", "avr25", "avr25"},
+ {"attiny85", "avr25", "avr25"},
+ {"attiny441", "avr25", "avr25"},
+ {"attiny461", "avr25", "avr25"},
+ {"attiny461a", "avr25", "avr25"},
+ {"attiny841", "avr25", "avr25"},
+ {"attiny861", "avr25", "avr25"},
+ {"attiny861a", "avr25", "avr25"},
+ {"attiny87", "avr25", "avr25"},
+ {"attiny43u", "avr25", "avr25"},
+ {"attiny48", "avr25", "avr25"},
+ {"attiny88", "avr25", "avr25"},
+ {"attiny828", "avr25", "avr25"},
+ {"at43usb355", "avr3", "avr3"},
+ {"at76c711", "avr3", "avr3"},
+ {"atmega103", "avr31", "avr31"},
+ {"at43usb320", "avr31", "avr31"},
+ {"attiny167", "avr35", "avr35"},
+ {"at90usb82", "avr35", "avr35"},
+ {"at90usb162", "avr35", "avr35"},
+ {"ata5505", "avr35", "avr35"},
+ {"atmega8u2", "avr35", "avr35"},
+ {"atmega16u2", "avr35", "avr35"},
+ {"atmega32u2", "avr35", "avr35"},
+ {"attiny1634", "avr35", "avr35"},
+ {"atmega8", "avr4", "avr4"},
+ {"ata6289", "avr4", "avr4"},
+ {"atmega8a", "avr4", "avr4"},
+ {"ata6285", "avr4", "avr4"},
+ {"ata6286", "avr4", "avr4"},
+ {"atmega48", "avr4", "avr4"},
+ {"atmega48a", "avr4", "avr4"},
+ {"atmega48pa", "avr4", "avr4"},
+ {"atmega48pb", "avr4", "avr4"},
+ {"atmega48p", "avr4", "avr4"},
+ {"atmega88", "avr4", "avr4"},
+ {"atmega88a", "avr4", "avr4"},
+ {"atmega88p", "avr4", "avr4"},
+ {"atmega88pa", "avr4", "avr4"},
+ {"atmega88pb", "avr4", "avr4"},
+ {"atmega8515", "avr4", "avr4"},
+ {"atmega8535", "avr4", "avr4"},
+ {"atmega8hva", "avr4", "avr4"},
+ {"at90pwm1", "avr4", "avr4"},
+ {"at90pwm2", "avr4", "avr4"},
+ {"at90pwm2b", "avr4", "avr4"},
+ {"at90pwm3", "avr4", "avr4"},
+ {"at90pwm3b", "avr4", "avr4"},
+ {"at90pwm81", "avr4", "avr4"},
+ {"ata5790", "avr5", "avr5"},
+ {"ata5795", "avr5", "avr5"},
+ {"atmega16", "avr5", "avr5"},
+ {"atmega16a", "avr5", "avr5"},
+ {"atmega161", "avr5", "avr5"},
+ {"atmega162", "avr5", "avr5"},
+ {"atmega163", "avr5", "avr5"},
+ {"atmega164a", "avr5", "avr5"},
+ {"atmega164p", "avr5", "avr5"},
+ {"atmega164pa", "avr5", "avr5"},
+ {"atmega165", "avr5", "avr5"},
+ {"atmega165a", "avr5", "avr5"},
+ {"atmega165p", "avr5", "avr5"},
+ {"atmega165pa", "avr5", "avr5"},
+ {"atmega168", "avr5", "avr5"},
+ {"atmega168a", "avr5", "avr5"},
+ {"atmega168p", "avr5", "avr5"},
+ {"atmega168pa", "avr5", "avr5"},
+ {"atmega168pb", "avr5", "avr5"},
+ {"atmega169", "avr5", "avr5"},
+ {"atmega169a", "avr5", "avr5"},
+ {"atmega169p", "avr5", "avr5"},
+ {"atmega169pa", "avr5", "avr5"},
+ {"atmega32", "avr5", "avr5"},
+ {"atmega32a", "avr5", "avr5"},
+ {"atmega323", "avr5", "avr5"},
+ {"atmega324a", "avr5", "avr5"},
+ {"atmega324p", "avr5", "avr5"},
+ {"atmega324pa", "avr5", "avr5"},
+ {"atmega325", "avr5", "avr5"},
+ {"atmega325a", "avr5", "avr5"},
+ {"atmega325p", "avr5", "avr5"},
+ {"atmega325pa", "avr5", "avr5"},
+ {"atmega3250", "avr5", "avr5"},
+ {"atmega3250a", "avr5", "avr5"},
+ {"atmega3250p", "avr5", "avr5"},
+ {"atmega3250pa", "avr5", "avr5"},
+ {"atmega328", "avr5", "avr5"},
+ {"atmega328p", "avr5", "avr5"},
+ {"atmega329", "avr5", "avr5"},
+ {"atmega329a", "avr5", "avr5"},
+ {"atmega329p", "avr5", "avr5"},
+ {"atmega329pa", "avr5", "avr5"},
+ {"atmega3290", "avr5", "avr5"},
+ {"atmega3290a", "avr5", "avr5"},
+ {"atmega3290p", "avr5", "avr5"},
+ {"atmega3290pa", "avr5", "avr5"},
+ {"atmega406", "avr5", "avr5"},
+ {"atmega64", "avr5", "avr5"},
+ {"atmega64a", "avr5", "avr5"},
+ {"atmega640", "avr5", "avr5"},
+ {"atmega644", "avr5", "avr5"},
+ {"atmega644a", "avr5", "avr5"},
+ {"atmega644p", "avr5", "avr5"},
+ {"atmega644pa", "avr5", "avr5"},
+ {"atmega645", "avr5", "avr5"},
+ {"atmega645a", "avr5", "avr5"},
+ {"atmega645p", "avr5", "avr5"},
+ {"atmega649", "avr5", "avr5"},
+ {"atmega649a", "avr5", "avr5"},
+ {"atmega649p", "avr5", "avr5"},
+ {"atmega6450", "avr5", "avr5"},
+ {"atmega6450a", "avr5", "avr5"},
+ {"atmega6450p", "avr5", "avr5"},
+ {"atmega6490", "avr5", "avr5"},
+ {"atmega6490a", "avr5", "avr5"},
+ {"atmega6490p", "avr5", "avr5"},
+ {"atmega64rfr2", "avr5", "avr5"},
+ {"atmega644rfr2", "avr5", "avr5"},
+ {"atmega16hva", "avr5", "avr5"},
+ {"atmega16hva2", "avr5", "avr5"},
+ {"atmega16hvb", "avr5", "avr5"},
+ {"atmega16hvbrevb", "avr5", "avr5"},
+ {"atmega32hvb", "avr5", "avr5"},
+ {"atmega32hvbrevb", "avr5", "avr5"},
+ {"atmega64hve", "avr5", "avr5"},
+ {"at90can32", "avr5", "avr5"},
+ {"at90can64", "avr5", "avr5"},
+ {"at90pwm161", "avr5", "avr5"},
+ {"at90pwm216", "avr5", "avr5"},
+ {"at90pwm316", "avr5", "avr5"},
+ {"atmega32c1", "avr5", "avr5"},
+ {"atmega64c1", "avr5", "avr5"},
+ {"atmega16m1", "avr5", "avr5"},
+ {"atmega32m1", "avr5", "avr5"},
+ {"atmega64m1", "avr5", "avr5"},
+ {"atmega16u4", "avr5", "avr5"},
+ {"atmega32u4", "avr5", "avr5"},
+ {"atmega32u6", "avr5", "avr5"},
+ {"at90usb646", "avr5", "avr5"},
+ {"at90usb647", "avr5", "avr5"},
+ {"at90scr100", "avr5", "avr5"},
+ {"at94k", "avr5", "avr5"},
+ {"m3000", "avr5", "avr5"},
+ {"atmega128", "avr51", "avr51"},
+ {"atmega128a", "avr51", "avr51"},
+ {"atmega1280", "avr51", "avr51"},
+ {"atmega1281", "avr51", "avr51"},
+ {"atmega1284", "avr51", "avr51"},
+ {"atmega1284p", "avr51", "avr51"},
+ {"atmega128rfa1", "avr51", "avr51"},
+ {"atmega128rfr2", "avr51", "avr51"},
+ {"atmega1284rfr2", "avr51", "avr51"},
+ {"at90can128", "avr51", "avr51"},
+ {"at90usb1286", "avr51", "avr51"},
+ {"at90usb1287", "avr51", "avr51"},
+ {"atmega2560", "avr6", "avr6"},
+ {"atmega2561", "avr6", "avr6"},
+ {"atmega256rfr2", "avr6", "avr6"},
+ {"atmega2564rfr2", "avr6", "avr6"},
+ {"attiny4", "avrtiny", "avrtiny"},
+ {"attiny5", "avrtiny", "avrtiny"},
+ {"attiny9", "avrtiny", "avrtiny"},
+ {"attiny10", "avrtiny", "avrtiny"},
+ {"attiny20", "avrtiny", "avrtiny"},
+ {"attiny40", "avrtiny", "avrtiny"},
+ {"atxmega16a4", "avrxmega2", "avrxmega2"},
+ {"atxmega16a4u", "avrxmega2", "avrxmega2"},
+ {"atxmega16c4", "avrxmega2", "avrxmega2"},
+ {"atxmega16d4", "avrxmega2", "avrxmega2"},
+ {"atxmega32a4", "avrxmega2", "avrxmega2"},
+ {"atxmega32a4u", "avrxmega2", "avrxmega2"},
+ {"atxmega32c4", "avrxmega2", "avrxmega2"},
+ {"atxmega32d4", "avrxmega2", "avrxmega2"},
+ {"atxmega32e5", "avrxmega2", "avrxmega2"},
+ {"atxmega16e5", "avrxmega2", "avrxmega2"},
+ {"atxmega8e5", "avrxmega2", "avrxmega2"},
+ {"atxmega64a3u", "avrxmega4", "avrxmega4"},
+ {"atxmega64a4u", "avrxmega4", "avrxmega4"},
+ {"atxmega64b1", "avrxmega4", "avrxmega4"},
+ {"atxmega64b3", "avrxmega4", "avrxmega4"},
+ {"atxmega64c3", "avrxmega4", "avrxmega4"},
+ {"atxmega64d3", "avrxmega4", "avrxmega4"},
+ {"atxmega64d4", "avrxmega4", "avrxmega4"},
+ {"atxmega64a1", "avrxmega5", "avrxmega5"},
+ {"atxmega64a1u", "avrxmega5", "avrxmega5"},
+ {"atxmega128a3", "avrxmega6", "avrxmega6"},
+ {"atxmega128a3u", "avrxmega6", "avrxmega6"},
+ {"atxmega128b1", "avrxmega6", "avrxmega6"},
+ {"atxmega128b3", "avrxmega6", "avrxmega6"},
+ {"atxmega128c3", "avrxmega6", "avrxmega6"},
+ {"atxmega128d3", "avrxmega6", "avrxmega6"},
+ {"atxmega128d4", "avrxmega6", "avrxmega6"},
+ {"atxmega192a3", "avrxmega6", "avrxmega6"},
+ {"atxmega192a3u", "avrxmega6", "avrxmega6"},
+ {"atxmega192c3", "avrxmega6", "avrxmega6"},
+ {"atxmega192d3", "avrxmega6", "avrxmega6"},
+ {"atxmega256a3", "avrxmega6", "avrxmega6"},
+ {"atxmega256a3u", "avrxmega6", "avrxmega6"},
+ {"atxmega256a3b", "avrxmega6", "avrxmega6"},
+ {"atxmega256a3bu", "avrxmega6", "avrxmega6"},
+ {"atxmega256c3", "avrxmega6", "avrxmega6"},
+ {"atxmega256d3", "avrxmega6", "avrxmega6"},
+ {"atxmega384c3", "avrxmega6", "avrxmega6"},
+ {"atxmega384d3", "avrxmega6", "avrxmega6"},
+ {"atxmega128a1", "avrxmega7", "avrxmega7"},
+ {"atxmega128a1u", "avrxmega7", "avrxmega7"},
+ {"atxmega128a4u", "avrxmega7", "avrxmega7"},
+};
+
+std::string GetMCUSubPath(StringRef MCUName) {
+ for (const auto &MCU : MCUInfo)
+ if (MCU.Name == MCUName)
+ return std::string(MCU.SubPath);
+ return "";
+}
+
+llvm::Optional<StringRef> GetMCUFamilyName(StringRef MCUName) {
+ for (const auto &MCU : MCUInfo)
+ if (MCU.Name == MCUName)
+ return Optional<StringRef>(MCU.Family);
+ return Optional<StringRef>();
+}
+
+llvm::Optional<unsigned> GetMCUSectionAddressData(StringRef MCU) {
+ return llvm::StringSwitch<llvm::Optional<unsigned>>(MCU)
+ .Case("atmega328", Optional<unsigned>(0x800100))
+ .Case("atmega328p", Optional<unsigned>(0x800100))
+ .Default(Optional<unsigned>());
}
const StringRef PossibleAVRLibcLocations[] = {
@@ -59,7 +319,7 @@ AVRToolChain::AVRToolChain(const Driver &D, const llvm::Triple &Triple,
// We cannot link any standard libraries without an MCU specified.
D.Diag(diag::warn_drv_avr_mcu_not_specified);
} else {
- Optional<StringRef> FamilyName = GetMcuFamilyName(CPU);
+ Optional<StringRef> FamilyName = GetMCUFamilyName(CPU);
Optional<std::string> AVRLibcRoot = findAVRLibcInstallation();
if (!FamilyName.hasValue()) {
@@ -76,11 +336,10 @@ AVRToolChain::AVRToolChain(const Driver &D, const llvm::Triple &Triple,
} else { // We have enough information to link stdlibs
std::string GCCRoot = std::string(GCCInstallation.getInstallPath());
std::string LibcRoot = AVRLibcRoot.getValue();
+ std::string SubPath = GetMCUSubPath(CPU);
- getFilePaths().push_back(LibcRoot + std::string("/lib/") +
- std::string(*FamilyName));
- getFilePaths().push_back(GCCRoot + std::string("/") +
- std::string(*FamilyName));
+ getFilePaths().push_back(LibcRoot + std::string("/lib/") + SubPath);
+ getFilePaths().push_back(GCCRoot + std::string("/") + SubPath);
LinkStdlib = true;
}
@@ -97,12 +356,12 @@ Tool *AVRToolChain::buildLinker() const {
void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
+ const InputInfoList &Inputs, const ArgList &Args,
const char *LinkingOutput) const {
// Compute information about the target AVR.
std::string CPU = getCPUName(Args, getToolChain().getTriple());
- llvm::Optional<StringRef> FamilyName = GetMcuFamilyName(CPU);
+ llvm::Optional<StringRef> FamilyName = GetMCUFamilyName(CPU);
+ llvm::Optional<unsigned> SectionAddressData = GetMCUSectionAddressData(CPU);
std::string Linker = getToolChain().GetProgramPath(getShortName());
ArgStringList CmdArgs;
@@ -118,6 +377,17 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
getToolChain().AddFilePathLibArgs(Args, CmdArgs);
+ if (SectionAddressData.hasValue()) {
+ std::string DataSectionArg = std::string("-Tdata=0x") +
+ llvm::utohexstr(SectionAddressData.getValue());
+ CmdArgs.push_back(Args.MakeArgString(DataSectionArg));
+ } else {
+ // We do not have an entry for this CPU in the address mapping table yet.
+ getToolChain().getDriver().Diag(
+ diag::warn_drv_avr_linker_section_addresses_not_implemented)
+ << CPU;
+ }
+
// If the family name is known, we can link with the device-specific libgcc.
// Without it, libgcc will simply not be linked. This matches avr-gcc
// behavior.
@@ -142,9 +412,9 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(std::string("-m") + *FamilyName));
}
- C.addCommand(
- std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
- Args.MakeArgString(Linker), CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker),
+ CmdArgs, Inputs, Output));
}
llvm::Optional<std::string> AVRToolChain::findAVRLibcInstallation() const {
diff --git a/clang/lib/Driver/ToolChains/Ananas.cpp b/clang/lib/Driver/ToolChains/Ananas.cpp
index a4141a57accc..e5e33fe24874 100644
--- a/clang/lib/Driver/ToolChains/Ananas.cpp
+++ b/clang/lib/Driver/ToolChains/Ananas.cpp
@@ -39,8 +39,9 @@ void ananas::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -124,8 +125,9 @@ void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
// Ananas - Ananas tool chain which can call as(1) and ld(1) directly.
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 487c50dfc466..a5e632fd8cdb 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -40,9 +40,20 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
// Handle CPU name is 'native'.
if (CPU == "native")
return std::string(llvm::sys::getHostCPUName());
- else if (CPU.size())
+
+ // arm64e requires v8.3a and only runs on apple-a12 and later CPUs.
+ if (Triple.isArm64e())
+ return "apple-a12";
+
+ if (CPU.size())
return CPU;
+ if (Triple.isTargetMachineMac() &&
+ Triple.getArch() == llvm::Triple::aarch64) {
+ // Apple Silicon macs default to A12 CPUs.
+ return "apple-a12";
+ }
+
// Make sure we pick the appropriate Apple CPU if -arch is used or when
// targetting a Darwin OS.
if (Args.getLastArg(options::OPT_arch) || Triple.isOSDarwin())
@@ -68,9 +79,10 @@ static bool DecodeAArch64Features(const Driver &D, StringRef text,
else
return false;
- // +sve implies +f32mm if the base architecture is v8.6A
+ // +sve implies +f32mm if the base architecture is v8.6A or v8.7A
// it isn't the case in general that sve implies both f64mm and f32mm
- if ((ArchKind == llvm::AArch64::ArchKind::ARMV8_6A) && Feature == "sve")
+ if ((ArchKind == llvm::AArch64::ArchKind::ARMV8_6A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV8_7A) && Feature == "sve")
Features.push_back("+f32mm");
}
return true;
@@ -94,7 +106,7 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
if (!llvm::AArch64::getArchFeatures(ArchKind, Features))
return false;
- unsigned Extension = llvm::AArch64::getDefaultExtensions(CPU, ArchKind);
+ uint64_t Extension = llvm::AArch64::getDefaultExtensions(CPU, ArchKind);
if (!llvm::AArch64::getExtensionFeatures(Extension, Features))
return false;
}
@@ -370,9 +382,11 @@ fp16_fml_fallthrough:
V8_6Pos = Features.insert(std::next(V8_6Pos), {"+i8mm", "+bf16"});
if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
- options::OPT_munaligned_access))
+ options::OPT_munaligned_access)) {
if (A->getOption().matches(options::OPT_mno_unaligned_access))
Features.push_back("+strict-align");
+ } else if (Triple.isOSOpenBSD())
+ Features.push_back("+strict-align");
if (Args.hasArg(options::OPT_ffixed_x1))
Features.push_back("+reserve-x1");
diff --git a/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index afe896b4a65b..ef590db1eecd 100644
--- a/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -32,6 +32,12 @@ bool arm::isARMMProfile(const llvm::Triple &Triple) {
return llvm::ARM::parseArchProfile(Arch) == llvm::ARM::ProfileKind::M;
}
+// True if A-profile.
+bool arm::isARMAProfile(const llvm::Triple &Triple) {
+ llvm::StringRef Arch = Triple.getArchName();
+ return llvm::ARM::parseArchProfile(Arch) == llvm::ARM::ProfileKind::A;
+}
+
// Get Arch/CPU from args.
void arm::getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch,
llvm::StringRef &CPU, bool FromAs) {
@@ -73,14 +79,15 @@ static unsigned getARMFPUFeatures(const Driver &D, const Arg *A,
}
// Decode ARM features from string like +[no]featureA+[no]featureB+...
-static bool DecodeARMFeatures(const Driver &D, StringRef text,
- StringRef CPU, llvm::ARM::ArchKind ArchKind,
- std::vector<StringRef> &Features) {
+static bool DecodeARMFeatures(const Driver &D, StringRef text, StringRef CPU,
+ llvm::ARM::ArchKind ArchKind,
+ std::vector<StringRef> &Features,
+ unsigned &ArgFPUID) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
for (StringRef Feature : Split) {
- if (!appendArchExtFeatures(CPU, ArchKind, Feature, Features))
+ if (!appendArchExtFeatures(CPU, ArchKind, Feature, Features, ArgFPUID))
return false;
}
return true;
@@ -102,14 +109,14 @@ static void DecodeARMFeaturesFromCPU(const Driver &D, StringRef CPU,
static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef ArchName, llvm::StringRef CPUName,
std::vector<StringRef> &Features,
- const llvm::Triple &Triple) {
+ const llvm::Triple &Triple, unsigned &ArgFPUID) {
std::pair<StringRef, StringRef> Split = ArchName.split("+");
std::string MArch = arm::getARMArch(ArchName, Triple);
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseArch(MArch);
if (ArchKind == llvm::ARM::ArchKind::INVALID ||
- (Split.second.size() && !DecodeARMFeatures(
- D, Split.second, CPUName, ArchKind, Features)))
+ (Split.second.size() && !DecodeARMFeatures(D, Split.second, CPUName,
+ ArchKind, Features, ArgFPUID)))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
@@ -117,15 +124,15 @@ static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef CPUName, llvm::StringRef ArchName,
std::vector<StringRef> &Features,
- const llvm::Triple &Triple) {
+ const llvm::Triple &Triple, unsigned &ArgFPUID) {
std::pair<StringRef, StringRef> Split = CPUName.split("+");
std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
llvm::ARM::ArchKind ArchKind =
arm::getLLVMArchKindForARM(CPU, ArchName, Triple);
if (ArchKind == llvm::ARM::ArchKind::INVALID ||
- (Split.second.size() && !DecodeARMFeatures(
- D, Split.second, CPU, ArchKind, Features)))
+ (Split.second.size() &&
+ !DecodeARMFeatures(D, Split.second, CPU, ArchKind, Features, ArgFPUID)))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
@@ -133,6 +140,7 @@ bool arm::useAAPCSForMachO(const llvm::Triple &T) {
// The backend is hardwired to assume AAPCS for M-class processors, ensure
// the frontend matches that.
return T.getEnvironment() == llvm::Triple::EABI ||
+ T.getEnvironment() == llvm::Triple::EABIHF ||
T.getOS() == llvm::Triple::UnknownOS || isARMMProfile(T);
}
@@ -159,11 +167,73 @@ arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
return arm::getARMFloatABI(TC.getDriver(), TC.getEffectiveTriple(), Args);
}
+arm::FloatABI arm::getDefaultFloatABI(const llvm::Triple &Triple) {
+ auto SubArch = getARMSubArchVersionNumber(Triple);
+ switch (Triple.getOS()) {
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS:
+ // Darwin defaults to "softfp" for v6 and v7.
+ if (Triple.isWatchABI())
+ return FloatABI::Hard;
+ else
+ return (SubArch == 6 || SubArch == 7) ? FloatABI::SoftFP : FloatABI::Soft;
+
+ case llvm::Triple::WatchOS:
+ return FloatABI::Hard;
+
+ // FIXME: this is invalid for WindowsCE
+ case llvm::Triple::Win32:
+ return FloatABI::Hard;
+
+ case llvm::Triple::NetBSD:
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ return FloatABI::Hard;
+ default:
+ return FloatABI::Soft;
+ }
+ break;
+
+ case llvm::Triple::FreeBSD:
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::GNUEABIHF:
+ return FloatABI::Hard;
+ default:
+ // FreeBSD defaults to soft float
+ return FloatABI::Soft;
+ }
+ break;
+
+ case llvm::Triple::OpenBSD:
+ return FloatABI::SoftFP;
+
+ default:
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::MuslEABIHF:
+ case llvm::Triple::EABIHF:
+ return FloatABI::Hard;
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::MuslEABI:
+ case llvm::Triple::EABI:
+ // EABI is always AAPCS, and if it was not marked 'hard', it's softfp
+ return FloatABI::SoftFP;
+ case llvm::Triple::Android:
+ return (SubArch >= 7) ? FloatABI::SoftFP : FloatABI::Soft;
+ default:
+ return FloatABI::Invalid;
+ }
+ }
+ return FloatABI::Invalid;
+}
+
// Select the float ABI as determined by -msoft-float, -mhard-float, and
// -mfloat-abi=.
arm::FloatABI arm::getARMFloatABI(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args) {
- auto SubArch = getARMSubArchVersionNumber(Triple);
arm::FloatABI ABI = FloatABI::Invalid;
if (Arg *A =
Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
@@ -183,95 +253,23 @@ arm::FloatABI arm::getARMFloatABI(const Driver &D, const llvm::Triple &Triple,
ABI = FloatABI::Soft;
}
}
-
- // It is incorrect to select hard float ABI on MachO platforms if the ABI is
- // "apcs-gnu".
- if (Triple.isOSBinFormatMachO() && !useAAPCSForMachO(Triple) &&
- ABI == FloatABI::Hard) {
- D.Diag(diag::err_drv_unsupported_opt_for_target) << A->getAsString(Args)
- << Triple.getArchName();
- }
}
// If unspecified, choose the default based on the platform.
- if (ABI == FloatABI::Invalid) {
- switch (Triple.getOS()) {
- case llvm::Triple::Darwin:
- case llvm::Triple::MacOSX:
- case llvm::Triple::IOS:
- case llvm::Triple::TvOS: {
- // Darwin defaults to "softfp" for v6 and v7.
- ABI = (SubArch == 6 || SubArch == 7) ? FloatABI::SoftFP : FloatABI::Soft;
- ABI = Triple.isWatchABI() ? FloatABI::Hard : ABI;
- break;
- }
- case llvm::Triple::WatchOS:
- ABI = FloatABI::Hard;
- break;
+ if (ABI == FloatABI::Invalid)
+ ABI = arm::getDefaultFloatABI(Triple);
- // FIXME: this is invalid for WindowsCE
- case llvm::Triple::Win32:
+ if (ABI == FloatABI::Invalid) {
+ // Assume "soft", but warn the user we are guessing.
+ if (Triple.isOSBinFormatMachO() &&
+ Triple.getSubArch() == llvm::Triple::ARMSubArch_v7em)
ABI = FloatABI::Hard;
- break;
-
- case llvm::Triple::NetBSD:
- switch (Triple.getEnvironment()) {
- case llvm::Triple::EABIHF:
- case llvm::Triple::GNUEABIHF:
- ABI = FloatABI::Hard;
- break;
- default:
- ABI = FloatABI::Soft;
- break;
- }
- break;
-
- case llvm::Triple::FreeBSD:
- switch (Triple.getEnvironment()) {
- case llvm::Triple::GNUEABIHF:
- ABI = FloatABI::Hard;
- break;
- default:
- // FreeBSD defaults to soft float
- ABI = FloatABI::Soft;
- break;
- }
- break;
-
- case llvm::Triple::OpenBSD:
- ABI = FloatABI::SoftFP;
- break;
+ else
+ ABI = FloatABI::Soft;
- default:
- switch (Triple.getEnvironment()) {
- case llvm::Triple::GNUEABIHF:
- case llvm::Triple::MuslEABIHF:
- case llvm::Triple::EABIHF:
- ABI = FloatABI::Hard;
- break;
- case llvm::Triple::GNUEABI:
- case llvm::Triple::MuslEABI:
- case llvm::Triple::EABI:
- // EABI is always AAPCS, and if it was not marked 'hard', it's softfp
- ABI = FloatABI::SoftFP;
- break;
- case llvm::Triple::Android:
- ABI = (SubArch >= 7) ? FloatABI::SoftFP : FloatABI::Soft;
- break;
- default:
- // Assume "soft", but warn the user we are guessing.
- if (Triple.isOSBinFormatMachO() &&
- Triple.getSubArch() == llvm::Triple::ARMSubArch_v7em)
- ABI = FloatABI::Hard;
- else
- ABI = FloatABI::Soft;
-
- if (Triple.getOS() != llvm::Triple::UnknownOS ||
- !Triple.isOSBinFormatMachO())
- D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft";
- break;
- }
- }
+ if (Triple.getOS() != llvm::Triple::UnknownOS ||
+ !Triple.isOSBinFormatMachO())
+ D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft";
}
assert(ABI != FloatABI::Invalid && "must select an ABI");
@@ -347,6 +345,8 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ);
StringRef ArchName;
StringRef CPUName;
+ unsigned ArchArgFPUID = llvm::ARM::FK_INVALID;
+ unsigned CPUArgFPUID = llvm::ARM::FK_INVALID;
// Check -mcpu. ClangAs gives preference to -Wa,-mcpu=.
if (WaCPU) {
@@ -364,14 +364,14 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
D.Diag(clang::diag::warn_drv_unused_argument)
<< ArchArg->getAsString(Args);
ArchName = StringRef(WaArch->getValue()).substr(7);
- checkARMArchName(D, WaArch, Args, ArchName, CPUName,
- ExtensionFeatures, Triple);
+ checkARMArchName(D, WaArch, Args, ArchName, CPUName, ExtensionFeatures,
+ Triple, ArchArgFPUID);
// FIXME: Set Arch.
D.Diag(clang::diag::warn_drv_unused_argument) << WaArch->getAsString(Args);
} else if (ArchArg) {
ArchName = ArchArg->getValue();
- checkARMArchName(D, ArchArg, Args, ArchName, CPUName,
- ExtensionFeatures, Triple);
+ checkARMArchName(D, ArchArg, Args, ArchName, CPUName, ExtensionFeatures,
+ Triple, ArchArgFPUID);
}
// Add CPU features for generic CPUs
@@ -390,8 +390,8 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
}
if (CPUArg)
- checkARMCPUName(D, CPUArg, Args, CPUName, ArchName,
- ExtensionFeatures, Triple);
+ checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, ExtensionFeatures,
+ Triple, CPUArgFPUID);
// Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
unsigned FPUID = llvm::ARM::FK_INVALID;
const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
@@ -455,20 +455,26 @@ fp16_fml_fallthrough:
Features.push_back("+fullfp16");
}
- // Setting -msoft-float/-mfloat-abi=soft effectively disables the FPU (GCC
- // ignores the -mfpu options in this case).
- // Note that the ABI can also be set implicitly by the target selected.
+ // Setting -msoft-float/-mfloat-abi=soft, -mfpu=none, or adding +nofp to
+ // -march/-mcpu effectively disables the FPU (GCC ignores the -mfpu options in
+ // this case). Note that the ABI can also be set implicitly by the target
+ // selected.
if (ABI == arm::FloatABI::Soft) {
llvm::ARM::getFPUFeatures(llvm::ARM::FK_NONE, Features);
// Disable all features relating to hardware FP, not already disabled by the
// above call.
+ Features.insert(Features.end(), {"-dotprod", "-fp16fml", "-bf16", "-mve",
+ "-mve.fp", "-fpregs"});
+ } else if (FPUID == llvm::ARM::FK_NONE ||
+ ArchArgFPUID == llvm::ARM::FK_NONE ||
+ CPUArgFPUID == llvm::ARM::FK_NONE) {
+ // -mfpu=none, -march=armvX+nofp or -mcpu=X+nofp is *very* similar to
+ // -mfloat-abi=soft, only that it should not disable MVE-I. They disable the
+ // FPU, but not the FPU registers, thus MVE-I, which depends only on the
+ // latter, is still supported.
Features.insert(Features.end(),
- {"-dotprod", "-fp16fml", "-mve", "-mve.fp", "-fpregs"});
- } else if (FPUID == llvm::ARM::FK_NONE) {
- // -mfpu=none is *very* similar to -mfloat-abi=soft, only that it should not
- // disable MVE-I.
- Features.insert(Features.end(), {"-dotprod", "-fp16fml", "-mve.fp"});
+ {"-dotprod", "-fp16fml", "-bf16", "-mve.fp"});
if (!hasIntegerMVE(Features))
Features.emplace_back("-fpregs");
}
@@ -606,6 +612,45 @@ fp16_fml_fallthrough:
if (Args.hasArg(options::OPT_mno_neg_immediates))
Features.push_back("+no-neg-immediates");
+
+ // Enable/disable straight line speculation hardening.
+ if (Arg *A = Args.getLastArg(options::OPT_mharden_sls_EQ)) {
+ StringRef Scope = A->getValue();
+ bool EnableRetBr = false;
+ bool EnableBlr = false;
+ if (Scope != "none" && Scope != "all") {
+ SmallVector<StringRef, 4> Opts;
+ Scope.split(Opts, ",");
+ for (auto Opt : Opts) {
+ Opt = Opt.trim();
+ if (Opt == "retbr") {
+ EnableRetBr = true;
+ continue;
+ }
+ if (Opt == "blr") {
+ EnableBlr = true;
+ continue;
+ }
+ D.Diag(diag::err_invalid_sls_hardening)
+ << Scope << A->getAsString(Args);
+ break;
+ }
+ } else if (Scope == "all") {
+ EnableRetBr = true;
+ EnableBlr = true;
+ }
+
+ if (EnableRetBr || EnableBlr)
+ if (!(isARMAProfile(Triple) && getARMSubArchVersionNumber(Triple) >= 7))
+ D.Diag(diag::err_sls_hardening_arm_not_supported)
+ << Scope << A->getAsString(Args);
+
+ if (EnableRetBr)
+ Features.push_back("+harden-sls-retbr");
+ if (EnableBlr)
+ Features.push_back("+harden-sls-blr");
+ }
+
}
const std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
diff --git a/clang/lib/Driver/ToolChains/Arch/ARM.h b/clang/lib/Driver/ToolChains/Arch/ARM.h
index 0ba1a59852aa..02d91cdaee13 100644
--- a/clang/lib/Driver/ToolChains/Arch/ARM.h
+++ b/clang/lib/Driver/ToolChains/Arch/ARM.h
@@ -47,6 +47,7 @@ enum class FloatABI {
Hard,
};
+FloatABI getDefaultFloatABI(const llvm::Triple &Triple);
FloatABI getARMFloatABI(const ToolChain &TC, const llvm::opt::ArgList &Args);
FloatABI getARMFloatABI(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
@@ -62,6 +63,7 @@ void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
std::vector<llvm::StringRef> &Features, bool ForAS);
int getARMSubArchVersionNumber(const llvm::Triple &Triple);
bool isARMMProfile(const llvm::Triple &Triple);
+bool isARMAProfile(const llvm::Triple &Triple);
} // end namespace arm
} // end namespace tools
diff --git a/clang/lib/Driver/ToolChains/Arch/Mips.cpp b/clang/lib/Driver/ToolChains/Arch/Mips.cpp
index 7b4dd703c0c7..5a509dbb2bd3 100644
--- a/clang/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -452,8 +452,6 @@ bool mips::isNaN2008(const ArgList &Args, const llvm::Triple &Triple) {
return llvm::StringSwitch<bool>(getCPUName(Args, Triple))
.Cases("mips32r6", "mips64r6", true)
.Default(false);
-
- return false;
}
bool mips::isFP64ADefault(const llvm::Triple &Triple, StringRef CPUName) {
diff --git a/clang/lib/Driver/ToolChains/Arch/PPC.cpp b/clang/lib/Driver/ToolChains/Arch/PPC.cpp
index 144e276a6bd8..bcaecf4b2d98 100644
--- a/clang/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -57,7 +57,6 @@ std::string ppc::getPPCTargetCPU(const ArgList &Args) {
.Case("970", "970")
.Case("G5", "g5")
.Case("a2", "a2")
- .Case("a2q", "a2q")
.Case("e500", "e500")
.Case("e500mc", "e500mc")
.Case("e5500", "e5500")
diff --git a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index 80d12e5aa8da..ffae47e5672e 100644
--- a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -58,12 +58,14 @@ static StringRef getExtensionType(StringRef Ext) {
// extension that the compiler currently supports.
static Optional<RISCVExtensionVersion>
isExperimentalExtension(StringRef Ext) {
- if (Ext == "b" || Ext == "zbb" || Ext == "zbc" || Ext == "zbe" ||
- Ext == "zbf" || Ext == "zbm" || Ext == "zbp" || Ext == "zbr" ||
- Ext == "zbs" || Ext == "zbt" || Ext == "zbproposedc")
- return RISCVExtensionVersion{"0", "92"};
- if (Ext == "v")
- return RISCVExtensionVersion{"0", "8"};
+ if (Ext == "b" || Ext == "zba" || Ext == "zbb" || Ext == "zbc" ||
+ Ext == "zbe" || Ext == "zbf" || Ext == "zbm" || Ext == "zbp" ||
+ Ext == "zbr" || Ext == "zbs" || Ext == "zbt" || Ext == "zbproposedc")
+ return RISCVExtensionVersion{"0", "93"};
+ if (Ext == "v" || Ext == "zvamo" || Ext == "zvlsseg")
+ return RISCVExtensionVersion{"1", "0"};
+ if (Ext == "zfh")
+ return RISCVExtensionVersion{"0", "1"};
return None;
}
@@ -256,7 +258,11 @@ static void getExtensionFeatures(const Driver &D,
<< MArch << Error << Ext;
return;
}
- if (isExperimentalExtension(Ext))
+ if (Ext == "zvamo" || Ext == "zvlsseg") {
+ Features.push_back("+experimental-v");
+ Features.push_back("+experimental-zvamo");
+ Features.push_back("+experimental-zvlsseg");
+ } else if (isExperimentalExtension(Ext))
Features.push_back(Args.MakeArgString("+experimental-" + Ext));
else
Features.push_back(Args.MakeArgString("+" + Ext));
@@ -410,9 +416,21 @@ static bool getArchFeatures(const Driver &D, StringRef MArch,
break;
case 'b':
Features.push_back("+experimental-b");
+ Features.push_back("+experimental-zba");
+ Features.push_back("+experimental-zbb");
+ Features.push_back("+experimental-zbc");
+ Features.push_back("+experimental-zbe");
+ Features.push_back("+experimental-zbf");
+ Features.push_back("+experimental-zbm");
+ Features.push_back("+experimental-zbp");
+ Features.push_back("+experimental-zbr");
+ Features.push_back("+experimental-zbs");
+ Features.push_back("+experimental-zbt");
break;
case 'v':
Features.push_back("+experimental-v");
+ Features.push_back("+experimental-zvamo");
+ Features.push_back("+experimental-zvlsseg");
break;
}
@@ -446,6 +464,19 @@ static bool getArchFeatures(const Driver &D, StringRef MArch,
return true;
}
+// Get features except standard extension feature
+static void getRISCFeaturesFromMcpu(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ const llvm::opt::Arg *A, StringRef Mcpu,
+ std::vector<StringRef> &Features) {
+ bool Is64Bit = (Triple.getArch() == llvm::Triple::riscv64);
+ llvm::RISCV::CPUKind CPUKind = llvm::RISCV::parseCPUKind(Mcpu);
+ if (!llvm::RISCV::checkCPUKind(CPUKind, Is64Bit) ||
+ !llvm::RISCV::getCPUFeaturesExceptStdExt(CPUKind, Features)) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ }
+}
+
void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
std::vector<StringRef> &Features) {
@@ -454,6 +485,11 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (!getArchFeatures(D, MArch, Features, Args))
return;
+ // If users give march and mcpu, get std extension feature from MArch
+ // and other features (ex. mirco architecture feature) from mcpu
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ getRISCFeaturesFromMcpu(D, Triple, Args, A, A->getValue(), Features);
+
// Handle features corresponding to "-ffixed-X" options
if (Args.hasArg(options::OPT_ffixed_x1))
Features.push_back("+reserve-x1");
@@ -543,11 +579,9 @@ StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
// GCC's logic around choosing a default `-mabi=` is complex. If GCC is not
// configured using `--with-abi=`, then the logic for the default choice is
- // defined in config.gcc. This function is based on the logic in GCC 9.2.0. We
- // deviate from GCC's default only on baremetal targets (UnknownOS) where
- // neither `-march` nor `-mabi` is specified.
+ // defined in config.gcc. This function is based on the logic in GCC 9.2.0.
//
- // The logic uses the following, in order:
+ // The logic used in GCC 9.2.0 is the following, in order:
// 1. Explicit choices using `--with-abi=`
// 2. A default based on `--with-arch=`, if provided
// 3. A default based on the target triple's arch
@@ -556,38 +590,40 @@ StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
//
// Clang does not have `--with-arch=` or `--with-abi=`, so we use `-march=`
// and `-mabi=` respectively instead.
+ //
+ // In order to make chosing logic more clear, Clang uses the following logic,
+ // in order:
+ // 1. Explicit choices using `-mabi=`
+ // 2. A default based on the architecture as determined by getRISCVArch
+ // 3. Choose a default based on the triple
// 1. If `-mabi=` is specified, use it.
if (const Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
return A->getValue();
- // 2. Choose a default based on `-march=`
+ // 2. Choose a default based on the target architecture.
//
// rv32g | rv32*d -> ilp32d
// rv32e -> ilp32e
// rv32* -> ilp32
// rv64g | rv64*d -> lp64d
// rv64* -> lp64
- if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
- StringRef MArch = A->getValue();
-
- if (MArch.startswith_lower("rv32")) {
- // FIXME: parse `March` to find `D` extension properly
- if (MArch.substr(4).contains_lower("d") ||
- MArch.startswith_lower("rv32g"))
- return "ilp32d";
- else if (MArch.startswith_lower("rv32e"))
- return "ilp32e";
- else
- return "ilp32";
- } else if (MArch.startswith_lower("rv64")) {
- // FIXME: parse `March` to find `D` extension properly
- if (MArch.substr(4).contains_lower("d") ||
- MArch.startswith_lower("rv64g"))
- return "lp64d";
- else
- return "lp64";
- }
+ StringRef MArch = getRISCVArch(Args, Triple);
+
+ if (MArch.startswith_lower("rv32")) {
+ // FIXME: parse `March` to find `D` extension properly
+ if (MArch.substr(4).contains_lower("d") || MArch.startswith_lower("rv32g"))
+ return "ilp32d";
+ else if (MArch.startswith_lower("rv32e"))
+ return "ilp32e";
+ else
+ return "ilp32";
+ } else if (MArch.startswith_lower("rv64")) {
+ // FIXME: parse `March` to find `D` extension properly
+ if (MArch.substr(4).contains_lower("d") || MArch.startswith_lower("rv64g"))
+ return "lp64d";
+ else
+ return "lp64";
}
// 3. Choose a default based on the triple
@@ -617,10 +653,11 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
// GCC's logic around choosing a default `-march=` is complex. If GCC is not
// configured using `--with-arch=`, then the logic for the default choice is
// defined in config.gcc. This function is based on the logic in GCC 9.2.0. We
- // deviate from GCC's default only on baremetal targets (UnknownOS) where
- // neither `-march` nor `-mabi` is specified.
+ // deviate from GCC's default on additional `-mcpu` option (GCC does not
+ // support `-mcpu`) and baremetal targets (UnknownOS) where neither `-march`
+ // nor `-mabi` is specified.
//
- // The logic uses the following, in order:
+ // The logic used in GCC 9.2.0 is the following, in order:
// 1. Explicit choices using `--with-arch=`
// 2. A default based on `--with-abi=`, if provided
// 3. A default based on the target triple's arch
@@ -630,6 +667,12 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
// Clang does not have `--with-arch=` or `--with-abi=`, so we use `-march=`
// and `-mabi=` respectively instead.
//
+ // Clang uses the following logic, in order:
+ // 1. Explicit choices using `-march=`
+ // 2. Based on `-mcpu` if the target CPU has a default ISA string
+ // 3. A default based on `-mabi`, if provided
+ // 4. A default based on the target triple's arch
+ //
// Clang does not yet support MULTILIB_REUSE, so we use `rv{XLEN}imafdc`
// instead of `rv{XLEN}gc` though they are (currently) equivalent.
@@ -637,7 +680,15 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
return A->getValue();
- // 2. Choose a default based on `-mabi=`
+ // 2. Get march (isa string) based on `-mcpu=`
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+ StringRef MArch = llvm::RISCV::getMArchFromMcpu(A->getValue());
+ // Bypass if target cpu's default march is empty.
+ if (MArch != "")
+ return MArch;
+ }
+
+ // 3. Choose a default based on `-mabi=`
//
// ilp32e -> rv32e
// ilp32 | ilp32f | ilp32d -> rv32imafdc
@@ -653,7 +704,7 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
return "rv64imafdc";
}
- // 3. Choose a default based on the triple
+ // 4. Choose a default based on the triple
//
// We deviate from GCC's defaults here:
// - On `riscv{XLEN}-unknown-elf` we default to `rv{XLEN}imac`
diff --git a/clang/lib/Driver/ToolChains/Arch/Sparc.cpp b/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
index 043b7f257c01..70ba8eb2a7d0 100644
--- a/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
@@ -21,12 +21,19 @@ using namespace llvm::opt;
const char *sparc::getSparcAsmModeForCPU(StringRef Name,
const llvm::Triple &Triple) {
if (Triple.getArch() == llvm::Triple::sparcv9) {
+ const char *DefV9CPU;
+
+ if (Triple.isOSLinux() || Triple.isOSFreeBSD() || Triple.isOSOpenBSD())
+ DefV9CPU = "-Av9a";
+ else
+ DefV9CPU = "-Av9";
+
return llvm::StringSwitch<const char *>(Name)
.Case("niagara", "-Av9b")
.Case("niagara2", "-Av9b")
.Case("niagara3", "-Av9d")
.Case("niagara4", "-Av9d")
- .Default("-Av9");
+ .Default(DefV9CPU);
} else {
return llvm::StringSwitch<const char *>(Name)
.Case("v8", "-Av8")
diff --git a/clang/lib/Driver/ToolChains/Arch/VE.cpp b/clang/lib/Driver/ToolChains/Arch/VE.cpp
index fa10e4810f1c..9dfd37c2106d 100644
--- a/clang/lib/Driver/ToolChains/Arch/VE.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/VE.cpp
@@ -18,9 +18,5 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-const char *ve::getVEAsmModeForCPU(StringRef Name, const llvm::Triple &Triple) {
- return "";
-}
-
void ve::getVETargetFeatures(const Driver &D, const ArgList &Args,
std::vector<StringRef> &Features) {}
diff --git a/clang/lib/Driver/ToolChains/Arch/VE.h b/clang/lib/Driver/ToolChains/Arch/VE.h
index 713e3e7d042f..531433534914 100644
--- a/clang/lib/Driver/ToolChains/Arch/VE.h
+++ b/clang/lib/Driver/ToolChains/Arch/VE.h
@@ -22,8 +22,6 @@ namespace ve {
void getVETargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
std::vector<llvm::StringRef> &Features);
-const char *getVEAsmModeForCPU(llvm::StringRef Name,
- const llvm::Triple &Triple);
} // end namespace ve
} // namespace tools
diff --git a/clang/lib/Driver/ToolChains/Arch/X86.cpp b/clang/lib/Driver/ToolChains/Arch/X86.cpp
index 2cc44c09917f..94a53f9d9e46 100644
--- a/clang/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -20,51 +20,52 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-const char *x86::getX86TargetCPU(const ArgList &Args,
+std::string x86::getX86TargetCPU(const ArgList &Args,
const llvm::Triple &Triple) {
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ)) {
- if (StringRef(A->getValue()) != "native")
- return A->getValue();
+ StringRef CPU = A->getValue();
+ if (CPU != "native")
+ return std::string(CPU);
// FIXME: Reject attempts to use -march=native unless the target matches
// the host.
//
// FIXME: We should also incorporate the detected target features for use
// with -native.
- std::string CPU = std::string(llvm::sys::getHostCPUName());
+ CPU = llvm::sys::getHostCPUName();
if (!CPU.empty() && CPU != "generic")
- return Args.MakeArgString(CPU);
+ return std::string(CPU);
}
if (const Arg *A = Args.getLastArgNoClaim(options::OPT__SLASH_arch)) {
// Mapping built by looking at lib/Basic's X86TargetInfo::initFeatureMap().
StringRef Arch = A->getValue();
- const char *CPU = nullptr;
+ StringRef CPU;
if (Triple.getArch() == llvm::Triple::x86) { // 32-bit-only /arch: flags.
- CPU = llvm::StringSwitch<const char *>(Arch)
+ CPU = llvm::StringSwitch<StringRef>(Arch)
.Case("IA32", "i386")
.Case("SSE", "pentium3")
.Case("SSE2", "pentium4")
- .Default(nullptr);
+ .Default("");
}
- if (CPU == nullptr) { // 32-bit and 64-bit /arch: flags.
- CPU = llvm::StringSwitch<const char *>(Arch)
+ if (CPU.empty()) { // 32-bit and 64-bit /arch: flags.
+ CPU = llvm::StringSwitch<StringRef>(Arch)
.Case("AVX", "sandybridge")
.Case("AVX2", "haswell")
.Case("AVX512F", "knl")
.Case("AVX512", "skylake-avx512")
- .Default(nullptr);
+ .Default("");
}
- if (CPU) {
+ if (!CPU.empty()) {
A->claim();
- return CPU;
+ return std::string(CPU);
}
}
// Select the default CPU if none was given (or detection failed).
if (!Triple.isX86())
- return nullptr; // This routine is only handling x86 targets.
+ return ""; // This routine is only handling x86 targets.
bool Is64Bit = Triple.getArch() == llvm::Triple::x86_64;
@@ -93,13 +94,13 @@ const char *x86::getX86TargetCPU(const ArgList &Args,
return "x86-64";
switch (Triple.getOS()) {
- case llvm::Triple::FreeBSD:
- return "i686";
case llvm::Triple::NetBSD:
- case llvm::Triple::OpenBSD:
return "i486";
case llvm::Triple::Haiku:
+ case llvm::Triple::OpenBSD:
return "i586";
+ case llvm::Triple::FreeBSD:
+ return "i686";
default:
// Fallback to p4.
return "pentium4";
diff --git a/clang/lib/Driver/ToolChains/Arch/X86.h b/clang/lib/Driver/ToolChains/Arch/X86.h
index 9f9c2b8c4b49..14f0a26c8be4 100644
--- a/clang/lib/Driver/ToolChains/Arch/X86.h
+++ b/clang/lib/Driver/ToolChains/Arch/X86.h
@@ -21,7 +21,7 @@ namespace driver {
namespace tools {
namespace x86 {
-const char *getX86TargetCPU(const llvm::opt::ArgList &Args,
+std::string getX86TargetCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple);
void getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
diff --git a/clang/lib/Driver/ToolChains/BareMetal.cpp b/clang/lib/Driver/ToolChains/BareMetal.cpp
index 97cfa7d0e156..7619dd30da5a 100644
--- a/clang/lib/Driver/ToolChains/BareMetal.cpp
+++ b/clang/lib/Driver/ToolChains/BareMetal.cpp
@@ -12,6 +12,7 @@
#include "InputInfo.h"
#include "Gnu.h"
+#include "Arch/RISCV.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
@@ -27,15 +28,83 @@ using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang::driver::toolchains;
+static Multilib makeMultilib(StringRef commonSuffix) {
+ return Multilib(commonSuffix, commonSuffix, commonSuffix);
+}
+
+static bool findRISCVMultilibs(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ const ArgList &Args, DetectedMultilibs &Result) {
+ Multilib::flags_list Flags;
+ StringRef Arch = riscv::getRISCVArch(Args, TargetTriple);
+ StringRef Abi = tools::riscv::getRISCVABI(Args, TargetTriple);
+
+ if (TargetTriple.getArch() == llvm::Triple::riscv64) {
+ Multilib Imac = makeMultilib("").flag("+march=rv64imac").flag("+mabi=lp64");
+ Multilib Imafdc = makeMultilib("/rv64imafdc/lp64d")
+ .flag("+march=rv64imafdc")
+ .flag("+mabi=lp64d");
+
+ // Multilib reuse
+ bool UseImafdc =
+ (Arch == "rv64imafdc") || (Arch == "rv64gc"); // gc => imafdc
+
+ addMultilibFlag((Arch == "rv64imac"), "march=rv64imac", Flags);
+ addMultilibFlag(UseImafdc, "march=rv64imafdc", Flags);
+ addMultilibFlag(Abi == "lp64", "mabi=lp64", Flags);
+ addMultilibFlag(Abi == "lp64d", "mabi=lp64d", Flags);
+
+ Result.Multilibs = MultilibSet().Either(Imac, Imafdc);
+ return Result.Multilibs.select(Flags, Result.SelectedMultilib);
+ }
+ if (TargetTriple.getArch() == llvm::Triple::riscv32) {
+ Multilib Imac =
+ makeMultilib("").flag("+march=rv32imac").flag("+mabi=ilp32");
+ Multilib I =
+ makeMultilib("/rv32i/ilp32").flag("+march=rv32i").flag("+mabi=ilp32");
+ Multilib Im =
+ makeMultilib("/rv32im/ilp32").flag("+march=rv32im").flag("+mabi=ilp32");
+ Multilib Iac = makeMultilib("/rv32iac/ilp32")
+ .flag("+march=rv32iac")
+ .flag("+mabi=ilp32");
+ Multilib Imafc = makeMultilib("/rv32imafc/ilp32f")
+ .flag("+march=rv32imafc")
+ .flag("+mabi=ilp32f");
+
+ // Multilib reuse
+ bool UseI = (Arch == "rv32i") || (Arch == "rv32ic"); // ic => i
+ bool UseIm = (Arch == "rv32im") || (Arch == "rv32imc"); // imc => im
+ bool UseImafc = (Arch == "rv32imafc") || (Arch == "rv32imafdc") ||
+ (Arch == "rv32gc"); // imafdc,gc => imafc
+
+ addMultilibFlag(UseI, "march=rv32i", Flags);
+ addMultilibFlag(UseIm, "march=rv32im", Flags);
+ addMultilibFlag((Arch == "rv32iac"), "march=rv32iac", Flags);
+ addMultilibFlag((Arch == "rv32imac"), "march=rv32imac", Flags);
+ addMultilibFlag(UseImafc, "march=rv32imafc", Flags);
+ addMultilibFlag(Abi == "ilp32", "mabi=ilp32", Flags);
+ addMultilibFlag(Abi == "ilp32f", "mabi=ilp32f", Flags);
+
+ Result.Multilibs = MultilibSet().Either(I, Im, Iac, Imac, Imafc);
+ return Result.Multilibs.select(Flags, Result.SelectedMultilib);
+ }
+ return false;
+}
+
BareMetal::BareMetal(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
-}
-BareMetal::~BareMetal() {}
+ findMultilibs(D, Triple, Args);
+ SmallString<128> SysRoot(computeSysRoot());
+ if (!SysRoot.empty()) {
+ llvm::sys::path::append(SysRoot, "lib");
+ getFilePaths().push_back(std::string(SysRoot));
+ }
+}
/// Is the triple {arm,thumb}-none-none-{eabi,eabihf} ?
static bool isARMBareMetal(const llvm::Triple &Triple) {
@@ -56,20 +125,65 @@ static bool isARMBareMetal(const llvm::Triple &Triple) {
return true;
}
+static bool isRISCVBareMetal(const llvm::Triple &Triple) {
+ if (Triple.getArch() != llvm::Triple::riscv32 &&
+ Triple.getArch() != llvm::Triple::riscv64)
+ return false;
+
+ if (Triple.getVendor() != llvm::Triple::UnknownVendor)
+ return false;
+
+ if (Triple.getOS() != llvm::Triple::UnknownOS)
+ return false;
+
+ return Triple.getEnvironmentName() == "elf";
+}
+
+void BareMetal::findMultilibs(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args) {
+ DetectedMultilibs Result;
+ if (isRISCVBareMetal(Triple)) {
+ if (findRISCVMultilibs(D, Triple, Args, Result)) {
+ SelectedMultilib = Result.SelectedMultilib;
+ Multilibs = Result.Multilibs;
+ }
+ }
+}
+
bool BareMetal::handlesTarget(const llvm::Triple &Triple) {
- return isARMBareMetal(Triple);
+ return isARMBareMetal(Triple) || isRISCVBareMetal(Triple);
}
Tool *BareMetal::buildLinker() const {
return new tools::baremetal::Linker(*this);
}
+std::string BareMetal::getCompilerRTPath() const { return getRuntimesDir(); }
+
+std::string BareMetal::getCompilerRTBasename(const llvm::opt::ArgList &,
+ StringRef, FileType, bool) const {
+ return ("libclang_rt.builtins-" + getTriple().getArchName() + ".a").str();
+}
+
std::string BareMetal::getRuntimesDir() const {
SmallString<128> Dir(getDriver().ResourceDir);
llvm::sys::path::append(Dir, "lib", "baremetal");
+ Dir += SelectedMultilib.gccSuffix();
return std::string(Dir.str());
}
+std::string BareMetal::computeSysRoot() const {
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot + SelectedMultilib.osSuffix();
+
+ SmallString<128> SysRootDir;
+ llvm::sys::path::append(SysRootDir, getDriver().Dir, "../lib/clang-runtimes",
+ getDriver().getTargetTriple());
+
+ SysRootDir += SelectedMultilib.osSuffix();
+ return std::string(SysRootDir);
+}
+
void BareMetal::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdinc))
@@ -82,9 +196,11 @@ void BareMetal::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
}
if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
- SmallString<128> Dir(getDriver().SysRoot);
- llvm::sys::path::append(Dir, "include");
- addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ SmallString<128> Dir(computeSysRoot());
+ if (!Dir.empty()) {
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
}
}
@@ -101,7 +217,7 @@ void BareMetal::AddClangCXXStdlibIncludeArgs(
DriverArgs.hasArg(options::OPT_nostdincxx))
return;
- StringRef SysRoot = getDriver().SysRoot;
+ std::string SysRoot(computeSysRoot());
if (SysRoot.empty())
return;
@@ -156,8 +272,17 @@ void BareMetal::AddCXXStdlibLibArgs(const ArgList &Args,
void BareMetal::AddLinkRuntimeLib(const ArgList &Args,
ArgStringList &CmdArgs) const {
- CmdArgs.push_back(Args.MakeArgString("-lclang_rt.builtins-" +
- getTriple().getArchName()));
+ ToolChain::RuntimeLibType RLT = GetRuntimeLibType(Args);
+ switch (RLT) {
+ case ToolChain::RLT_CompilerRT:
+ CmdArgs.push_back(
+ Args.MakeArgString("-lclang_rt.builtins-" + getTriple().getArchName()));
+ return;
+ case ToolChain::RLT_Libgcc:
+ CmdArgs.push_back("-lgcc");
+ return;
+ }
+ llvm_unreachable("Unhandled RuntimeLibType.");
}
void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -175,6 +300,7 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString("-L" + TC.getRuntimesDir()));
+ TC.AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
options::OPT_e, options::OPT_s, options::OPT_t,
options::OPT_Z_Flag, options::OPT_r});
@@ -193,5 +319,5 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
Args.MakeArgString(TC.GetLinkerPath()),
- CmdArgs, Inputs));
+ CmdArgs, Inputs, Output));
}
diff --git a/clang/lib/Driver/ToolChains/BareMetal.h b/clang/lib/Driver/ToolChains/BareMetal.h
index 4c0c739307b1..a6d4922a380f 100644
--- a/clang/lib/Driver/ToolChains/BareMetal.h
+++ b/clang/lib/Driver/ToolChains/BareMetal.h
@@ -23,9 +23,13 @@ class LLVM_LIBRARY_VISIBILITY BareMetal : public ToolChain {
public:
BareMetal(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
- ~BareMetal() override;
+ ~BareMetal() override = default;
static bool handlesTarget(const llvm::Triple &Triple);
+
+ void findMultilibs(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
protected:
Tool *buildLinker() const override;
@@ -37,6 +41,14 @@ public:
bool isPICDefaultForced() const override { return false; }
bool SupportsProfiling() const override { return false; }
+ StringRef getOSLibName() const override { return "baremetal"; }
+
+ std::string getCompilerRTPath() const override;
+ std::string getCompilerRTBasename(const llvm::opt::ArgList &Args,
+ StringRef Component,
+ FileType Type = ToolChain::FT_Static,
+ bool AddArch = true) const override;
+
RuntimeLibType GetDefaultRuntimeLibType() const override {
return ToolChain::RLT_CompilerRT;
}
@@ -59,6 +71,7 @@ public:
llvm::opt::ArgStringList &CmdArgs) const override;
void AddLinkRuntimeLib(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ std::string computeSysRoot() const override;
};
} // namespace toolchains
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 9d6333bb5f1d..fdb8a58cd1b3 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -39,15 +39,12 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/YAMLParser.h"
-#ifdef LLVM_ON_UNIX
-#include <unistd.h> // For getuid().
-#endif
-
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang;
@@ -331,6 +328,7 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
break;
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
ppc::getPPCTargetFeatures(D, Triple, Args, Features);
@@ -365,7 +363,7 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
break;
case llvm::Triple::r600:
case llvm::Triple::amdgcn:
- amdgpu::getAMDGPUTargetFeatures(D, Args, Features);
+ amdgpu::getAMDGPUTargetFeatures(D, Triple, Args, Features);
break;
case llvm::Triple::msp430:
msp430::getMSP430TargetFeatures(D, Args, Features);
@@ -498,7 +496,7 @@ static codegenoptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
return codegenoptions::DebugLineTablesOnly;
if (A.getOption().matches(options::OPT_gline_directives_only))
return codegenoptions::DebugDirectivesOnly;
- return codegenoptions::DebugInfoConstructor;
+ return codegenoptions::LimitedDebugInfo;
}
static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
@@ -527,6 +525,7 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
// WebAssembly never wants frame pointers.
return false;
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
case llvm::Triple::riscv32:
@@ -603,12 +602,12 @@ getFramePointerKind(const ArgList &Args, const llvm::Triple &Triple) {
bool OmitFP = A && A->getOption().matches(options::OPT_fomit_frame_pointer);
bool NoOmitFP =
A && A->getOption().matches(options::OPT_fno_omit_frame_pointer);
- bool KeepLeaf = Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
- options::OPT_mno_omit_leaf_frame_pointer,
- Triple.isAArch64() || Triple.isPS4CPU());
+ bool OmitLeafFP = Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
+ options::OPT_mno_omit_leaf_frame_pointer,
+ Triple.isAArch64() || Triple.isPS4CPU());
if (NoOmitFP || mustUseNonLeafFramePointerForTarget(Triple) ||
(!OmitFP && useFramePointerForTargetByDefault(Args, Triple))) {
- if (KeepLeaf)
+ if (OmitLeafFP)
return CodeGenOptions::FramePointerKind::NonLeaf;
return CodeGenOptions::FramePointerKind::All;
}
@@ -657,6 +656,21 @@ static void addMacroPrefixMapArg(const Driver &D, const ArgList &Args,
}
}
+/// Add a CC1 and CC1AS option to specify the coverage file path prefix map.
+static void addProfilePrefixMapArg(const Driver &D, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
+ options::OPT_fprofile_prefix_map_EQ)) {
+ StringRef Map = A->getValue();
+ if (Map.find('=') == StringRef::npos)
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << Map << A->getOption().getName();
+ else
+ CmdArgs.push_back(Args.MakeArgString("-fprofile-prefix-map=" + Map));
+ A->claim();
+ }
+}
+
/// Vectorize at all optimization levels greater than 1 except for -Oz.
/// For -Oz the loop vectorizer is disabled, while the slp vectorizer is
/// enabled.
@@ -763,9 +777,11 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
D.Diag(diag::err_drv_argument_not_allowed_with)
<< ProfileGenerateArg->getSpelling() << ProfileUseArg->getSpelling();
- if (CSPGOGenerateArg && PGOGenerateArg)
+ if (CSPGOGenerateArg && PGOGenerateArg) {
D.Diag(diag::err_drv_argument_not_allowed_with)
<< CSPGOGenerateArg->getSpelling() << PGOGenerateArg->getSpelling();
+ PGOGenerateArg = nullptr;
+ }
if (ProfileGenerateArg) {
if (ProfileGenerateArg->getOption().matches(
@@ -830,9 +846,9 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
Args.hasArg(options::OPT_coverage);
bool EmitCovData = TC.needsGCovInstrumentation(Args);
if (EmitCovNotes)
- CmdArgs.push_back("-femit-coverage-notes");
+ CmdArgs.push_back("-ftest-coverage");
if (EmitCovData)
- CmdArgs.push_back("-femit-coverage-data");
+ CmdArgs.push_back("-fprofile-arcs");
if (Args.hasFlag(options::OPT_fcoverage_mapping,
options::OPT_fno_coverage_mapping, false)) {
@@ -867,6 +883,17 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
CmdArgs.push_back(Args.MakeArgString(Twine("-fprofile-filter-files=" + v)));
}
+ if (const auto *A = Args.getLastArg(options::OPT_fprofile_update_EQ)) {
+ StringRef Val = A->getValue();
+ if (Val == "atomic" || Val == "prefer-atomic")
+ CmdArgs.push_back("-fprofile-update=atomic");
+ else if (Val != "single")
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Val;
+ } else if (TC.getSanitizerArgs().needsTsanRt()) {
+ CmdArgs.push_back("-fprofile-update=atomic");
+ }
+
// Leave -fprofile-dir= an unused argument unless .gcda emission is
// enabled. To be polite, with '-fprofile-arcs -fno-profile-arcs' consider
// the flag used. There is no -fno-profile-dir, so the user has no
@@ -976,6 +1003,9 @@ static void RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
case codegenoptions::FullDebugInfo:
CmdArgs.push_back("-debug-info-kind=standalone");
break;
+ case codegenoptions::UnusedTypeInfo:
+ CmdArgs.push_back("-debug-info-kind=unused-types");
+ break;
default:
break;
}
@@ -1011,18 +1041,10 @@ static void RenderDebugInfoCompressionArgs(const ArgList &Args,
ArgStringList &CmdArgs,
const Driver &D,
const ToolChain &TC) {
- const Arg *A = Args.getLastArg(options::OPT_gz, options::OPT_gz_EQ);
+ const Arg *A = Args.getLastArg(options::OPT_gz_EQ);
if (!A)
return;
if (checkDebugInfoOption(A, Args, D, TC)) {
- if (A->getOption().getID() == options::OPT_gz) {
- if (llvm::zlib::isAvailable())
- CmdArgs.push_back("--compress-debug-sections");
- else
- D.Diag(diag::warn_debug_compression_unavailable);
- return;
- }
-
StringRef Value = A->getValue();
if (Value == "none") {
CmdArgs.push_back("--compress-debug-sections=none");
@@ -1057,6 +1079,15 @@ static const char *RelocationModelName(llvm::Reloc::Model Model) {
}
llvm_unreachable("Unknown Reloc::Model kind");
}
+static void handleAMDGPUCodeObjectVersionOptions(const Driver &D,
+ const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ unsigned CodeObjVer = getOrCheckAMDGPUCodeObjectVersion(D, Args);
+ CmdArgs.insert(CmdArgs.begin() + 1,
+ Args.MakeArgString(Twine("--amdhsa-code-object-version=") +
+ Twine(CodeObjVer)));
+ CmdArgs.insert(CmdArgs.begin() + 1, "-mllvm");
+}
void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
const Driver &D, const ArgList &Args,
@@ -1197,7 +1228,11 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
if (YcArg && JA.getKind() >= Action::PrecompileJobClass &&
JA.getKind() <= Action::AssembleJobClass) {
CmdArgs.push_back(Args.MakeArgString("-building-pch-with-obj"));
- CmdArgs.push_back(Args.MakeArgString("-fpch-instantiate-templates"));
+ // -fpch-instantiate-templates is the default when creating
+ // precomp using /Yc
+ if (Args.hasFlag(options::OPT_fpch_instantiate_templates,
+ options::OPT_fno_pch_instantiate_templates, true))
+ CmdArgs.push_back(Args.MakeArgString("-fpch-instantiate-templates"));
}
if (YcArg || YuArg) {
StringRef ThroughHeader = YcArg ? YcArg->getValue() : YuArg->getValue();
@@ -1340,6 +1375,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
}
addMacroPrefixMapArg(D, Args, CmdArgs);
+ addProfilePrefixMapArg(D, Args, CmdArgs);
}
// FIXME: Move to target hook.
@@ -1366,6 +1402,7 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) {
return false;
case llvm::Triple::hexagon:
+ case llvm::Triple::ppcle:
case llvm::Triple::ppc64le:
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
@@ -1582,6 +1619,7 @@ void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
break;
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
AddPPCTargetArgs(Args, CmdArgs);
@@ -1715,6 +1753,21 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
if (IndirectBranches)
CmdArgs.push_back("-mbranch-target-enforce");
}
+
+ // Handle -msve_vector_bits=<bits>
+ if (Arg *A = Args.getLastArg(options::OPT_msve_vector_bits_EQ)) {
+ StringRef Val = A->getValue();
+ const Driver &D = getToolChain().getDriver();
+ if (Val.equals("128") || Val.equals("256") || Val.equals("512") ||
+ Val.equals("1024") || Val.equals("2048"))
+ CmdArgs.push_back(
+ Args.MakeArgString(llvm::Twine("-msve-vector-bits=") + Val));
+ // Silently drop requests for vector-length agnostic code as it's implied.
+ else if (!Val.equals("scalable"))
+ // Handle the unsupported values passed to msve-vector-bits.
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Val;
+ }
}
void Clang::AddMIPSTargetArgs(const ArgList &Args,
@@ -1868,19 +1921,8 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
if (T.isOSBinFormatELF()) {
switch (getToolChain().getArch()) {
case llvm::Triple::ppc64: {
- // When targeting a processor that supports QPX, or if QPX is
- // specifically enabled, default to using the ABI that supports QPX (so
- // long as it is not specifically disabled).
- bool HasQPX = false;
- if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
- HasQPX = A->getValue() == StringRef("a2q");
- HasQPX = Args.hasFlag(options::OPT_mqpx, options::OPT_mno_qpx, HasQPX);
- if (HasQPX) {
- ABIName = "elfv1-qpx";
- break;
- }
-
- if (T.isMusl() || (T.isOSFreeBSD() && T.getOSMajorVersion() >= 13))
+ if ((T.isOSFreeBSD() && T.getOSMajorVersion() >= 13) ||
+ T.isOSOpenBSD() || T.isMusl())
ABIName = "elfv2";
else
ABIName = "elfv1";
@@ -1970,6 +2012,20 @@ void Clang::AddRISCVTargetArgs(const ArgList &Args,
CmdArgs.push_back(ABIName.data());
SetRISCVSmallDataLimit(getToolChain(), Args, CmdArgs);
+
+ std::string TuneCPU;
+
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mtune_EQ)) {
+ StringRef Name = A->getValue();
+
+ Name = llvm::RISCV::resolveTuneCPUAlias(Name, Triple.isArch64Bit());
+ TuneCPU = std::string(Name);
+ }
+
+ if (!TuneCPU.empty()) {
+ CmdArgs.push_back("-tune-cpu");
+ CmdArgs.push_back(Args.MakeArgString(TuneCPU));
+ }
}
void Clang::AddSparcTargetArgs(const ArgList &Args,
@@ -2064,6 +2120,31 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
CmdArgs.push_back("soft");
CmdArgs.push_back("-mstack-alignment=4");
}
+
+ // Handle -mtune.
+
+ // Default to "generic" unless -march is present or targetting the PS4.
+ std::string TuneCPU;
+ if (!Args.hasArg(clang::driver::options::OPT_march_EQ) &&
+ !getToolChain().getTriple().isPS4CPU())
+ TuneCPU = "generic";
+
+ // Override based on -mtune.
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mtune_EQ)) {
+ StringRef Name = A->getValue();
+
+ if (Name == "native") {
+ Name = llvm::sys::getHostCPUName();
+ if (!Name.empty())
+ TuneCPU = std::string(Name);
+ } else
+ TuneCPU = std::string(Name);
+ }
+
+ if (!TuneCPU.empty()) {
+ CmdArgs.push_back("-tune-cpu");
+ CmdArgs.push_back(Args.MakeArgString(TuneCPU));
+ }
}
void Clang::AddHexagonTargetArgs(const ArgList &Args,
@@ -2380,7 +2461,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
CmdArgs.push_back(Value.data());
} else {
RenderDebugEnablingArgs(Args, CmdArgs,
- codegenoptions::DebugInfoConstructor,
+ codegenoptions::LimitedDebugInfo,
DwarfVersion, llvm::DebuggerKind::Default);
}
} else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") ||
@@ -2766,9 +2847,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (TrappingMath) {
// FP Exception Behavior is also set to strict
assert(FPExceptionBehavior.equals("strict"));
- CmdArgs.push_back("-ftrapping-math");
- } else if (TrappingMathPresent)
- CmdArgs.push_back("-fno-trapping-math");
+ }
// The default is IEEE.
if (DenormalFPMath != llvm::DenormalMode::getIEEE()) {
@@ -2911,8 +2990,9 @@ static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
Args.AddAllArgValues(CmdArgs, options::OPT_Xanalyzer);
}
-static void RenderSSPOptions(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs, bool KernelOrKext) {
+static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ bool KernelOrKext) {
const llvm::Triple &EffectiveTriple = TC.getEffectiveTriple();
// NVPTX doesn't support stack protectors; from the compiler's perspective, it
@@ -2921,8 +3001,8 @@ static void RenderSSPOptions(const ToolChain &TC, const ArgList &Args,
return;
// -stack-protector=0 is default.
- unsigned StackProtectorLevel = 0;
- unsigned DefaultStackProtectorLevel =
+ LangOptions::StackProtectorMode StackProtectorLevel = LangOptions::SSPOff;
+ LangOptions::StackProtectorMode DefaultStackProtectorLevel =
TC.GetDefaultStackProtectorLevel(KernelOrKext);
if (Arg *A = Args.getLastArg(options::OPT_fno_stack_protector,
@@ -2931,7 +3011,7 @@ static void RenderSSPOptions(const ToolChain &TC, const ArgList &Args,
options::OPT_fstack_protector)) {
if (A->getOption().matches(options::OPT_fstack_protector))
StackProtectorLevel =
- std::max<unsigned>(LangOptions::SSPOn, DefaultStackProtectorLevel);
+ std::max<>(LangOptions::SSPOn, DefaultStackProtectorLevel);
else if (A->getOption().matches(options::OPT_fstack_protector_strong))
StackProtectorLevel = LangOptions::SSPStrong;
else if (A->getOption().matches(options::OPT_fstack_protector_all))
@@ -2957,6 +3037,50 @@ static void RenderSSPOptions(const ToolChain &TC, const ArgList &Args,
A->claim();
}
}
+
+ // First support "tls" and "global" for X86 target.
+ // TODO: Support "sysreg" for AArch64.
+ const std::string &TripleStr = EffectiveTriple.getTriple();
+ if (Arg *A = Args.getLastArg(options::OPT_mstack_protector_guard_EQ)) {
+ StringRef Value = A->getValue();
+ if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ if (Value != "tls" && Value != "global") {
+ D.Diag(diag::err_drv_invalid_value_with_suggestion)
+ << A->getOption().getName() << Value
+ << "valid arguments to '-mstack-protector-guard=' are:tls global";
+ return;
+ }
+ A->render(Args, CmdArgs);
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mstack_protector_guard_offset_EQ)) {
+ StringRef Value = A->getValue();
+ if (!EffectiveTriple.isX86())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ unsigned Offset;
+ if (Value.getAsInteger(10, Offset)) {
+ D.Diag(diag::err_drv_invalid_value) << A->getOption().getName() << Value;
+ return;
+ }
+ A->render(Args, CmdArgs);
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mstack_protector_guard_reg_EQ)) {
+ StringRef Value = A->getValue();
+ if (!EffectiveTriple.isX86())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ if (EffectiveTriple.isX86() && (Value != "fs" && Value != "gs")) {
+ D.Diag(diag::err_drv_invalid_value_with_suggestion)
+ << A->getOption().getName() << Value
+ << "for X86, valid arguments to '-mstack-protector-guard-reg=' are:fs gs";
+ return;
+ }
+ A->render(Args, CmdArgs);
+ }
}
static void RenderSCPOptions(const ToolChain &TC, const ArgList &Args,
@@ -2971,7 +3095,7 @@ static void RenderSCPOptions(const ToolChain &TC, const ArgList &Args,
return;
if (Args.hasFlag(options::OPT_fstack_clash_protection,
- options::OPT_fnostack_clash_protection, false))
+ options::OPT_fno_stack_clash_protection, false))
CmdArgs.push_back("-fstack-clash-protection");
}
@@ -3072,13 +3196,13 @@ static void RenderARCMigrateToolOptions(const Driver &D, const ArgList &Args,
switch (A->getOption().getID()) {
default: llvm_unreachable("missed a case");
case options::OPT_ccc_arcmt_check:
- CmdArgs.push_back("-arcmt-check");
+ CmdArgs.push_back("-arcmt-action=check");
break;
case options::OPT_ccc_arcmt_modify:
- CmdArgs.push_back("-arcmt-modify");
+ CmdArgs.push_back("-arcmt-action=modify");
break;
case options::OPT_ccc_arcmt_migrate:
- CmdArgs.push_back("-arcmt-migrate");
+ CmdArgs.push_back("-arcmt-action=migrate");
CmdArgs.push_back("-mt-migrate-directory");
CmdArgs.push_back(A->getValue());
@@ -3261,6 +3385,9 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
std::string("-fprebuilt-module-path=") + A->getValue()));
A->claim();
}
+ if (Args.hasFlag(options::OPT_fprebuilt_implicit_modules,
+ options::OPT_fno_prebuilt_implicit_modules, false))
+ CmdArgs.push_back("-fprebuilt-implicit-modules");
if (Args.hasFlag(options::OPT_fmodules_validate_input_files_content,
options::OPT_fno_modules_validate_input_files_content,
false))
@@ -3376,8 +3503,8 @@ static void RenderCharacterOptions(const ArgList &Args, const llvm::Triple &T,
} else {
bool IsARM = T.isARM() || T.isThumb() || T.isAArch64();
CmdArgs.push_back("-fwchar-type=int");
- if (IsARM && !(T.isOSWindows() || T.isOSNetBSD() ||
- T.isOSOpenBSD()))
+ if (T.isOSzOS() ||
+ (IsARM && !(T.isOSWindows() || T.isOSNetBSD() || T.isOSOpenBSD())))
CmdArgs.push_back("-fno-signed-wchar");
else
CmdArgs.push_back("-fsigned-wchar");
@@ -3594,9 +3721,9 @@ enum class DwarfFissionKind { None, Split, Single };
static DwarfFissionKind getDebugFissionKind(const Driver &D,
const ArgList &Args, Arg *&Arg) {
- Arg =
- Args.getLastArg(options::OPT_gsplit_dwarf, options::OPT_gsplit_dwarf_EQ);
- if (!Arg)
+ Arg = Args.getLastArg(options::OPT_gsplit_dwarf, options::OPT_gsplit_dwarf_EQ,
+ options::OPT_gno_split_dwarf);
+ if (!Arg || Arg->getOption().matches(options::OPT_gno_split_dwarf))
return DwarfFissionKind::None;
if (Arg->getOption().matches(options::OPT_gsplit_dwarf))
@@ -3613,9 +3740,10 @@ static DwarfFissionKind getDebugFissionKind(const Driver &D,
return DwarfFissionKind::None;
}
-static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
+static void renderDebugOptions(const ToolChain &TC, const Driver &D,
const llvm::Triple &T, const ArgList &Args,
- bool EmitCodeView, ArgStringList &CmdArgs,
+ bool EmitCodeView, bool IRInput,
+ ArgStringList &CmdArgs,
codegenoptions::DebugInfoKind &DebugInfoKind,
DwarfFissionKind &DwarfFission) {
if (Args.hasFlag(options::OPT_fdebug_info_for_profiling,
@@ -3639,21 +3767,20 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
Args.hasFlag(options::OPT_fsplit_dwarf_inlining,
options::OPT_fno_split_dwarf_inlining, false);
- Args.ClaimAllArgs(options::OPT_g_Group);
-
- Arg* SplitDWARFArg;
- DwarfFission = getDebugFissionKind(D, Args, SplitDWARFArg);
-
- if (DwarfFission != DwarfFissionKind::None &&
- !checkDebugInfoOption(SplitDWARFArg, Args, D, TC)) {
- DwarfFission = DwarfFissionKind::None;
- SplitDWARFInlining = false;
+ // Normally -gsplit-dwarf is only useful with -gN. For IR input, Clang does
+ // object file generation and no IR generation, -gN should not be needed. So
+ // allow -gsplit-dwarf with either -gN or IR input.
+ if (IRInput || Args.hasArg(options::OPT_g_Group)) {
+ Arg *SplitDWARFArg;
+ DwarfFission = getDebugFissionKind(D, Args, SplitDWARFArg);
+ if (DwarfFission != DwarfFissionKind::None &&
+ !checkDebugInfoOption(SplitDWARFArg, Args, D, TC)) {
+ DwarfFission = DwarfFissionKind::None;
+ SplitDWARFInlining = false;
+ }
}
-
- if (const Arg *A =
- Args.getLastArg(options::OPT_g_Group, options::OPT_gsplit_dwarf,
- options::OPT_gsplit_dwarf_EQ)) {
- DebugInfoKind = codegenoptions::DebugInfoConstructor;
+ if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
+ DebugInfoKind = codegenoptions::LimitedDebugInfo;
// If the last option explicitly specified a debug-info level, use it.
if (checkDebugInfoOption(A, Args, D, TC) &&
@@ -3715,26 +3842,33 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
}
}
- unsigned DWARFVersion = 0;
+ unsigned RequestedDWARFVersion = 0; // DWARF version requested by the user
+ unsigned EffectiveDWARFVersion = 0; // DWARF version TC can generate. It may
+ // be lower than what the user wanted.
unsigned DefaultDWARFVersion = ParseDebugDefaultVersion(TC, Args);
if (EmitDwarf) {
// Start with the platform default DWARF version
- DWARFVersion = TC.GetDefaultDwarfVersion();
- assert(DWARFVersion && "toolchain default DWARF version must be nonzero");
+ RequestedDWARFVersion = TC.GetDefaultDwarfVersion();
+ assert(RequestedDWARFVersion &&
+ "toolchain default DWARF version must be nonzero");
// If the user specified a default DWARF version, that takes precedence
// over the platform default.
if (DefaultDWARFVersion)
- DWARFVersion = DefaultDWARFVersion;
+ RequestedDWARFVersion = DefaultDWARFVersion;
// Override with a user-specified DWARF version
if (GDwarfN)
if (auto ExplicitVersion = DwarfVersionNum(GDwarfN->getSpelling()))
- DWARFVersion = ExplicitVersion;
+ RequestedDWARFVersion = ExplicitVersion;
+ // Clamp effective DWARF version to the max supported by the toolchain.
+ EffectiveDWARFVersion =
+ std::min(RequestedDWARFVersion, TC.getMaxDwarfVersion());
}
// -gline-directives-only supported only for the DWARF debug info.
- if (DWARFVersion == 0 && DebugInfoKind == codegenoptions::DebugDirectivesOnly)
+ if (RequestedDWARFVersion == 0 &&
+ DebugInfoKind == codegenoptions::DebugDirectivesOnly)
DebugInfoKind = codegenoptions::NoDebugInfo;
// We ignore flag -gstrict-dwarf for now.
@@ -3758,7 +3892,7 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
if (checkDebugInfoOption(A, Args, D, TC)) {
if (DebugInfoKind != codegenoptions::DebugLineTablesOnly &&
DebugInfoKind != codegenoptions::DebugDirectivesOnly) {
- DebugInfoKind = codegenoptions::DebugInfoConstructor;
+ DebugInfoKind = codegenoptions::LimitedDebugInfo;
CmdArgs.push_back("-dwarf-ext-refs");
CmdArgs.push_back("-fmodule-format=obj");
}
@@ -3778,10 +3912,14 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
TC.GetDefaultStandaloneDebug());
if (const Arg *A = Args.getLastArg(options::OPT_fstandalone_debug))
(void)checkDebugInfoOption(A, Args, D, TC);
- if ((DebugInfoKind == codegenoptions::LimitedDebugInfo ||
- DebugInfoKind == codegenoptions::DebugInfoConstructor) &&
- NeedFullDebug)
- DebugInfoKind = codegenoptions::FullDebugInfo;
+
+ if (DebugInfoKind == codegenoptions::LimitedDebugInfo) {
+ if (Args.hasFlag(options::OPT_fno_eliminate_unused_debug_types,
+ options::OPT_feliminate_unused_debug_types, false))
+ DebugInfoKind = codegenoptions::UnusedTypeInfo;
+ else if (NeedFullDebug)
+ DebugInfoKind = codegenoptions::FullDebugInfo;
+ }
if (Args.hasFlag(options::OPT_gembed_source, options::OPT_gno_embed_source,
false)) {
@@ -3790,9 +3928,15 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
// fallen back to the target default, so if this is still not at least 5
// we emit an error.
const Arg *A = Args.getLastArg(options::OPT_gembed_source);
- if (DWARFVersion < 5)
+ if (RequestedDWARFVersion < 5)
D.Diag(diag::err_drv_argument_only_allowed_with)
<< A->getAsString(Args) << "-gdwarf-5";
+ else if (EffectiveDWARFVersion < 5)
+ // The toolchain has reduced allowed dwarf version, so we can't enable
+ // -gembed-source.
+ D.Diag(diag::warn_drv_dwarf_version_limited_by_target)
+ << A->getAsString(Args) << TC.getTripleString() << 5
+ << EffectiveDWARFVersion;
else if (checkDebugInfoOption(A, Args, D, TC))
CmdArgs.push_back("-gembed-source");
}
@@ -3813,15 +3957,15 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-gno-inline-line-tables");
}
- // Adjust the debug info kind for the given toolchain.
- TC.adjustDebugInfoKind(DebugInfoKind, Args);
-
// When emitting remarks, we need at least debug lines in the output.
if (willEmitRemarks(Args) &&
DebugInfoKind <= codegenoptions::DebugDirectivesOnly)
DebugInfoKind = codegenoptions::DebugLineTablesOnly;
- RenderDebugEnablingArgs(Args, CmdArgs, DebugInfoKind, DWARFVersion,
+ // Adjust the debug info kind for the given toolchain.
+ TC.adjustDebugInfoKind(DebugInfoKind, Args);
+
+ RenderDebugEnablingArgs(Args, CmdArgs, DebugInfoKind, EffectiveDWARFVersion,
DebuggerTuning);
// -fdebug-macro turns on macro debug info generation.
@@ -3867,7 +4011,7 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
if (Args.hasFlag(options::OPT_fdebug_types_section,
options::OPT_fno_debug_types_section, false)) {
- if (!T.isOSBinFormatELF()) {
+ if (!(T.isOSBinFormatELF() || T.isOSBinFormatWasm())) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< Args.getLastArg(options::OPT_fdebug_types_section)
->getAsString(Args)
@@ -3890,6 +4034,25 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
if (DebuggerTuning == llvm::DebuggerKind::SCE)
CmdArgs.push_back("-dwarf-explicit-import");
+ auto *DwarfFormatArg =
+ Args.getLastArg(options::OPT_gdwarf64, options::OPT_gdwarf32);
+ if (DwarfFormatArg &&
+ DwarfFormatArg->getOption().matches(options::OPT_gdwarf64)) {
+ const llvm::Triple &RawTriple = TC.getTriple();
+ if (EffectiveDWARFVersion < 3)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << DwarfFormatArg->getAsString(Args) << "DWARFv3 or greater";
+ else if (!RawTriple.isArch64Bit())
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << DwarfFormatArg->getAsString(Args) << "64 bit architecture";
+ else if (!RawTriple.isOSBinFormatELF())
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << DwarfFormatArg->getAsString(Args) << "ELF platforms";
+ }
+
+ if (DwarfFormatArg)
+ DwarfFormatArg->render(Args, CmdArgs);
+
RenderDebugInfoCompressionArgs(Args, CmdArgs, D, TC);
}
@@ -4187,6 +4350,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.getLastArg(options::OPT_save_temps_EQ))
Args.AddLastArg(CmdArgs, options::OPT_save_temps_EQ);
+ auto *MemProfArg = Args.getLastArg(options::OPT_fmemory_profile,
+ options::OPT_fmemory_profile_EQ,
+ options::OPT_fno_memory_profile);
+ if (MemProfArg &&
+ !MemProfArg->getOption().matches(options::OPT_fno_memory_profile))
+ MemProfArg->render(Args, CmdArgs);
+
// Embed-bitcode option.
// Only white-listed flags below are allowed to be embedded.
if (C.getDriver().embedBitcodeInObject() && !C.getDriver().isUsingLTO() &&
@@ -4300,9 +4470,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
II.getInputArg().renderAsInput(Args, CmdArgs);
}
- C.addCommand(
- std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileUTF8(),
- D.getClangProgramPath(), CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), D.getClangProgramPath(),
+ CmdArgs, Inputs, Output));
return;
}
@@ -4431,12 +4601,31 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
- // The default is -fno-semantic-interposition. We render it just because we
- // require explicit -fno-semantic-interposition to infer dso_local.
- if (Arg *A = Args.getLastArg(options::OPT_fsemantic_interposition,
- options::OPT_fno_semantic_interposition))
- if (RelocationModel != llvm::Reloc::Static && !IsPIE)
- A->render(Args, CmdArgs);
+ // -fsemantic-interposition is forwarded to CC1: set the
+ // "SemanticInterposition" metadata to 1 (make some linkages interposable) and
+ // make default visibility external linkage definitions dso_preemptable.
+ //
+ // -fno-semantic-interposition: if the target supports .Lfoo$local local
+ // aliases (make default visibility external linkage definitions dso_local).
+ // This is the CC1 default for ELF to match COFF/Mach-O.
+ //
+ // Otherwise use Clang's traditional behavior: like
+ // -fno-semantic-interposition but local aliases are not used. So references
+ // can be interposed if not optimized out.
+ if (Triple.isOSBinFormatELF()) {
+ Arg *A = Args.getLastArg(options::OPT_fsemantic_interposition,
+ options::OPT_fno_semantic_interposition);
+ if (RelocationModel != llvm::Reloc::Static && !IsPIE) {
+ // The supported targets need to call AsmPrinter::getSymbolPreferLocal.
+ bool SupportsLocalAlias = Triple.isX86();
+ if (!A)
+ CmdArgs.push_back("-fhalf-no-semantic-interposition");
+ else if (A->getOption().matches(options::OPT_fsemantic_interposition))
+ A->render(Args, CmdArgs);
+ else if (!SupportsLocalAlias)
+ CmdArgs.push_back("-fhalf-no-semantic-interposition");
+ }
+ }
{
std::string Model;
@@ -4480,6 +4669,23 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ if (Triple.isOSAIX() && Args.hasArg(options::OPT_maltivec)) {
+ if (Args.getLastArg(options::OPT_mabi_EQ_vec_extabi)) {
+ CmdArgs.push_back("-mabi=vec-extabi");
+ } else {
+ D.Diag(diag::err_aix_default_altivec_abi);
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ_vec_extabi,
+ options::OPT_mabi_EQ_vec_default)) {
+ if (!Triple.isOSAIX())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << RawTriple.str();
+ if (A->getOption().getID() == options::OPT_mabi_EQ_vec_default)
+ D.Diag(diag::err_aix_default_altivec_abi);
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_Wframe_larger_than_EQ)) {
StringRef v = A->getValue();
CmdArgs.push_back("-mllvm");
@@ -4506,7 +4712,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_maix_struct_return,
options::OPT_msvr4_struct_return)) {
- if (TC.getArch() != llvm::Triple::ppc) {
+ if (!TC.getTriple().isPPC32()) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getSpelling() << RawTriple.str();
} else if (A->getOption().matches(options::OPT_maix_struct_return)) {
@@ -4619,7 +4825,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_LongDouble_Group)) {
if (TC.getTriple().isX86())
A->render(Args, CmdArgs);
- else if ((TC.getArch() == llvm::Triple::ppc || TC.getTriple().isPPC64()) &&
+ else if (TC.getTriple().isPPC() &&
(A->getOption().getID() != options::OPT_mlong_double_80))
A->render(Args, CmdArgs);
else
@@ -4634,6 +4840,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
IsIntegratedAssemblerDefault))
CmdArgs.push_back("-fno-verbose-asm");
+ // Parse 'none' or '$major.$minor'. Disallow -fbinutils-version=0 because we
+ // use that to indicate the MC default in the backend.
+ if (Arg *A = Args.getLastArg(options::OPT_fbinutils_version_EQ)) {
+ StringRef V = A->getValue();
+ unsigned Num;
+ if (V == "none")
+ A->render(Args, CmdArgs);
+ else if (!V.consumeInteger(10, Num) && Num > 0 &&
+ (V.empty() || (V.consume_front(".") &&
+ !V.consumeInteger(10, Num) && V.empty())))
+ A->render(Args, CmdArgs);
+ else
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << A->getValue() << A->getOption().getName();
+ }
+
if (!TC.useIntegratedAs())
CmdArgs.push_back("-no-integrated-as");
@@ -4662,11 +4884,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mms-bitfields");
}
- if (Args.hasFlag(options::OPT_mpie_copy_relocations,
- options::OPT_mno_pie_copy_relocations,
- false)) {
- CmdArgs.push_back("-mpie-copy-relocations");
- }
+ // Non-PIC code defaults to -fdirect-access-external-data while PIC code
+ // defaults to -fno-direct-access-external-data. Pass the option if different
+ // from the default.
+ if (Arg *A = Args.getLastArg(options::OPT_fdirect_access_external_data,
+ options::OPT_fno_direct_access_external_data))
+ if (A->getOption().matches(options::OPT_fdirect_access_external_data) !=
+ (PICLevel == 0))
+ A->render(Args, CmdArgs);
if (Args.hasFlag(options::OPT_fno_plt, options::OPT_fplt, false)) {
CmdArgs.push_back("-fno-plt");
@@ -4757,18 +4982,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (D.IsCLMode())
AddClangCLArgs(Args, InputType, CmdArgs, &DebugInfoKind, &EmitCodeView);
- DwarfFissionKind DwarfFission;
- RenderDebugOptions(TC, D, RawTriple, Args, EmitCodeView, CmdArgs,
- DebugInfoKind, DwarfFission);
+ DwarfFissionKind DwarfFission = DwarfFissionKind::None;
+ renderDebugOptions(TC, D, RawTriple, Args, EmitCodeView,
+ types::isLLVMIR(InputType), CmdArgs, DebugInfoKind,
+ DwarfFission);
// Add the split debug info name to the command lines here so we
// can propagate it to the backend.
bool SplitDWARF = (DwarfFission != DwarfFissionKind::None) &&
- TC.getTriple().isOSBinFormatELF() &&
+ (TC.getTriple().isOSBinFormatELF() ||
+ TC.getTriple().isOSBinFormatWasm()) &&
(isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
isa<BackendJobAction>(JA));
if (SplitDWARF) {
- const char *SplitDWARFOut = SplitDebugName(Args, Input, Output);
+ const char *SplitDWARFOut = SplitDebugName(JA, Args, Input, Output);
CmdArgs.push_back("-split-dwarf-file");
CmdArgs.push_back(SplitDWARFOut);
if (DwarfFission == DwarfFissionKind::Split) {
@@ -4839,17 +5066,23 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Arg *A = Args.getLastArg(options::OPT_fbasic_block_sections_EQ)) {
- StringRef Val = A->getValue();
- if (Val != "all" && Val != "labels" && Val != "none" &&
- !(Val.startswith("list=") && llvm::sys::fs::exists(Val.substr(5))))
- D.Diag(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue();
- else
- A->render(Args, CmdArgs);
+ if (Triple.isX86() && Triple.isOSBinFormatELF()) {
+ StringRef Val = A->getValue();
+ if (Val != "all" && Val != "labels" && Val != "none" &&
+ !Val.startswith("list="))
+ D.Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ else
+ A->render(Args, CmdArgs);
+ } else {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
}
+ bool HasDefaultDataSections = Triple.isOSBinFormatXCOFF();
if (Args.hasFlag(options::OPT_fdata_sections, options::OPT_fno_data_sections,
- UseSeparateSections)) {
+ UseSeparateSections || HasDefaultDataSections)) {
CmdArgs.push_back("-fdata-sections");
}
@@ -4865,6 +5098,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_unique_basic_block_section_names, false))
CmdArgs.push_back("-funique-basic-block-section-names");
+ if (Arg *A = Args.getLastArg(options::OPT_fsplit_machine_functions,
+ options::OPT_fno_split_machine_functions)) {
+ // This codegen pass is only available on x86-elf targets.
+ if (Triple.isX86() && Triple.isOSBinFormatELF()) {
+ if (A->getOption().matches(options::OPT_fsplit_machine_functions))
+ A->render(Args, CmdArgs);
+ } else {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
+ }
+
Args.AddLastArg(CmdArgs, options::OPT_finstrument_functions,
options::OPT_finstrument_functions_after_inlining,
options::OPT_finstrument_function_entry_bare);
@@ -5169,7 +5414,32 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ if (!RawTriple.isPS4())
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fvisibility_from_dllstorageclass,
+ options::OPT_fno_visibility_from_dllstorageclass)) {
+ if (A->getOption().matches(
+ options::OPT_fvisibility_from_dllstorageclass)) {
+ CmdArgs.push_back("-fvisibility-from-dllstorageclass");
+ Args.AddLastArg(CmdArgs, options::OPT_fvisibility_dllexport_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fvisibility_nodllstorageclass_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fvisibility_externs_dllimport_EQ);
+ Args.AddLastArg(CmdArgs,
+ options::OPT_fvisibility_externs_nodllstorageclass_EQ);
+ }
+ }
+
+ if (const Arg *A = Args.getLastArg(options::OPT_mignore_xcoff_visibility)) {
+ if (Triple.isOSAIX())
+ CmdArgs.push_back("-mignore-xcoff-visibility");
+ else
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
+
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
+ Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden_static_local_var,
+ options::OPT_fno_visibility_inlines_hidden_static_local_var);
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_global_new_delete_hidden);
Args.AddLastArg(CmdArgs, options::OPT_ftlsmodel_EQ);
@@ -5261,6 +5531,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const XRayArgs &XRay = TC.getXRayArgs();
XRay.addArgs(TC, Args, CmdArgs, InputType);
+ for (const auto &Filename :
+ Args.getAllArgValues(options::OPT_fprofile_list_EQ)) {
+ if (D.getVFS().exists(Filename))
+ CmdArgs.push_back(Args.MakeArgString("-fprofile-list=" + Filename));
+ else
+ D.Diag(clang::diag::err_drv_no_such_file) << Filename;
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ)) {
StringRef S0 = A->getValue(), S = S0;
unsigned Size, Offset = 0;
@@ -5318,6 +5596,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info);
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_parseable_fixits);
Args.AddLastArg(CmdArgs, options::OPT_ftime_report);
+ Args.AddLastArg(CmdArgs, options::OPT_ftime_report_EQ);
Args.AddLastArg(CmdArgs, options::OPT_ftime_trace);
Args.AddLastArg(CmdArgs, options::OPT_ftime_trace_granularity_EQ);
Args.AddLastArg(CmdArgs, options::OPT_ftrapv);
@@ -5357,7 +5636,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_mno_speculative_load_hardening, false))
CmdArgs.push_back(Args.MakeArgString("-mspeculative-load-hardening"));
- RenderSSPOptions(TC, Args, CmdArgs, KernelOrKext);
+ RenderSSPOptions(D, TC, Args, CmdArgs, KernelOrKext);
RenderSCPOptions(TC, Args, CmdArgs);
RenderTrivialAutoVarInitOptions(D, TC, Args, CmdArgs);
@@ -5404,9 +5683,26 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -cl options to -cc1
RenderOpenCLOptions(Args, CmdArgs);
- if (IsHIP && Args.hasFlag(options::OPT_fhip_new_launch_api,
- options::OPT_fno_hip_new_launch_api, true))
- CmdArgs.push_back("-fhip-new-launch-api");
+ if (IsHIP) {
+ if (Args.hasFlag(options::OPT_fhip_new_launch_api,
+ options::OPT_fno_hip_new_launch_api, true))
+ CmdArgs.push_back("-fhip-new-launch-api");
+ if (Args.hasFlag(options::OPT_fgpu_allow_device_init,
+ options::OPT_fno_gpu_allow_device_init, false))
+ CmdArgs.push_back("-fgpu-allow-device-init");
+ }
+
+ if (IsCuda || IsHIP) {
+ if (Args.hasFlag(options::OPT_fgpu_defer_diag,
+ options::OPT_fno_gpu_defer_diag, false))
+ CmdArgs.push_back("-fgpu-defer-diag");
+ if (Args.hasFlag(options::OPT_fgpu_exclude_wrong_side_overloads,
+ options::OPT_fno_gpu_exclude_wrong_side_overloads,
+ false)) {
+ CmdArgs.push_back("-fgpu-exclude-wrong-side-overloads");
+ CmdArgs.push_back("-fgpu-defer-diag");
+ }
+ }
if (Arg *A = Args.getLastArg(options::OPT_fcf_protection_EQ)) {
CmdArgs.push_back(
@@ -5432,6 +5728,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddLastArg(CmdArgs, options::OPT_fprofile_remapping_file_EQ);
+ if (Args.hasFlag(options::OPT_fpseudo_probe_for_profiling,
+ options::OPT_fno_pseudo_probe_for_profiling, false))
+ CmdArgs.push_back("-fpseudo-probe-for-profiling");
+
RenderBuiltinOptions(TC, RawTriple, Args, CmdArgs);
if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
@@ -5479,9 +5779,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
(RTTIMode == ToolChain::RM_Disabled)))
CmdArgs.push_back("-fno-rtti");
- // -fshort-enums=0 is default for all architectures except Hexagon.
+ // -fshort-enums=0 is default for all architectures except Hexagon and z/OS.
if (Args.hasFlag(options::OPT_fshort_enums, options::OPT_fno_short_enums,
- TC.getArch() == llvm::Triple::hexagon))
+ TC.getArch() == llvm::Triple::hexagon || Triple.isOSzOS()))
CmdArgs.push_back("-fshort-enums");
RenderCharacterOptions(Args, AuxTriple ? *AuxTriple : RawTriple, CmdArgs);
@@ -5555,7 +5855,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
LanguageStandard = llvm::StringSwitch<StringRef>(StdArg->getValue())
.Case("c++14", "-std=c++14")
.Case("c++17", "-std=c++17")
- .Case("c++latest", "-std=c++2a")
+ .Case("c++latest", "-std=c++20")
.Default("");
if (LanguageStandard.empty())
D.Diag(clang::diag::warn_drv_unused_argument)
@@ -5618,7 +5918,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// FIXME: Find a better way to determine whether the language has modules
// support by default, or just assume that all languages do.
bool HaveModules =
- Std && (Std->containsValue("c++2a") || Std->containsValue("c++latest"));
+ Std && (Std->containsValue("c++2a") || Std->containsValue("c++20") ||
+ Std->containsValue("c++latest"));
RenderModulesOptions(C, D, Args, Input, Output, CmdArgs, HaveModules);
if (Args.hasFlag(options::OPT_fpch_validate_input_files_content,
@@ -5627,9 +5928,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_fpch_instantiate_templates,
options::OPT_fno_pch_instantiate_templates, false))
CmdArgs.push_back("-fpch-instantiate-templates");
+ if (Args.hasFlag(options::OPT_fpch_codegen, options::OPT_fno_pch_codegen,
+ false))
+ CmdArgs.push_back("-fmodules-codegen");
+ if (Args.hasFlag(options::OPT_fpch_debuginfo, options::OPT_fno_pch_debuginfo,
+ false))
+ CmdArgs.push_back("-fmodules-debuginfo");
- Args.AddLastArg(CmdArgs, options::OPT_fexperimental_new_pass_manager,
- options::OPT_fno_experimental_new_pass_manager);
+ Args.AddLastArg(CmdArgs, options::OPT_flegacy_pass_manager,
+ options::OPT_fno_legacy_pass_manager);
ObjCRuntime Runtime = AddObjCRuntimeArgs(Args, Inputs, CmdArgs, rewriteKind);
RenderObjCOptions(TC, D, RawTriple, Args, Runtime, rewriteKind != RK_None,
@@ -5650,25 +5957,25 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (A) {
const Option &Opt = A->getOption();
if (Opt.matches(options::OPT_fsjlj_exceptions))
- CmdArgs.push_back("-fsjlj-exceptions");
+ CmdArgs.push_back("-exception-model=sjlj");
if (Opt.matches(options::OPT_fseh_exceptions))
- CmdArgs.push_back("-fseh-exceptions");
+ CmdArgs.push_back("-exception-model=seh");
if (Opt.matches(options::OPT_fdwarf_exceptions))
- CmdArgs.push_back("-fdwarf-exceptions");
+ CmdArgs.push_back("-exception-model=dwarf");
if (Opt.matches(options::OPT_fwasm_exceptions))
- CmdArgs.push_back("-fwasm-exceptions");
+ CmdArgs.push_back("-exception-model=wasm");
} else {
switch (TC.GetExceptionModel(Args)) {
default:
break;
case llvm::ExceptionHandling::DwarfCFI:
- CmdArgs.push_back("-fdwarf-exceptions");
+ CmdArgs.push_back("-exception-model=dwarf");
break;
case llvm::ExceptionHandling::SjLj:
- CmdArgs.push_back("-fsjlj-exceptions");
+ CmdArgs.push_back("-exception-model=sjlj");
break;
case llvm::ExceptionHandling::WinEH:
- CmdArgs.push_back("-fseh-exceptions");
+ CmdArgs.push_back("-exception-model=seh");
break;
}
}
@@ -5841,6 +6148,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_apple_pragma_pack, false))
CmdArgs.push_back("-fapple-pragma-pack");
+ if (Args.hasFlag(options::OPT_fxl_pragma_pack,
+ options::OPT_fno_xl_pragma_pack, RawTriple.isOSAIX()))
+ CmdArgs.push_back("-fxl-pragma-pack");
+
// Remarks can be enabled with any of the `-f.*optimization-record.*` flags.
if (willEmitRemarks(Args) && checkRemarksOptions(D, Args, Triple))
renderRemarksOptions(Args, CmdArgs, Triple, Input, Output, JA);
@@ -6022,6 +6333,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ if (Triple.isAMDGPU()) {
+ handleAMDGPUCodeObjectVersionOptions(D, Args, CmdArgs);
+
+ if (Args.hasFlag(options::OPT_munsafe_fp_atomics,
+ options::OPT_mno_unsafe_fp_atomics))
+ CmdArgs.push_back("-munsafe-fp-atomics");
+ }
+
// For all the host OpenMP offloading compile jobs we need to pass the targets
// information using -fopenmp-targets= option.
if (JA.isHostOffloading(Action::OFK_OpenMP)) {
@@ -6145,35 +6464,32 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_cxx_static_destructors, true))
CmdArgs.push_back("-fno-c++-static-destructors");
- if (Arg *A = Args.getLastArg(options::OPT_moutline,
- options::OPT_mno_outline)) {
- if (A->getOption().matches(options::OPT_moutline)) {
- // We only support -moutline in AArch64 and ARM targets right now. If
- // we're not compiling for these, emit a warning and ignore the flag.
- // Otherwise, add the proper mllvm flags.
- if (!(Triple.isARM() || Triple.isThumb() ||
- Triple.getArch() == llvm::Triple::aarch64 ||
- Triple.getArch() == llvm::Triple::aarch64_32)) {
- D.Diag(diag::warn_drv_moutline_unsupported_opt) << Triple.getArchName();
+ addMachineOutlinerArgs(D, Args, CmdArgs, Triple, /*IsLTO=*/false);
+
+ if (Arg *A = Args.getLastArg(options::OPT_moutline_atomics,
+ options::OPT_mno_outline_atomics)) {
+ if (A->getOption().matches(options::OPT_moutline_atomics)) {
+ // Option -moutline-atomics supported for AArch64 target only.
+ if (!Triple.isAArch64()) {
+ D.Diag(diag::warn_drv_moutline_atomics_unsupported_opt)
+ << Triple.getArchName();
} else {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-enable-machine-outliner");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+outline-atomics");
}
} else {
- // Disable all outlining behaviour.
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-enable-machine-outliner=never");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-outline-atomics");
}
}
if (Args.hasFlag(options::OPT_faddrsig, options::OPT_fno_addrsig,
(TC.getTriple().isOSBinFormatELF() ||
TC.getTriple().isOSBinFormatCOFF()) &&
- !TC.getTriple().isPS4() &&
- !TC.getTriple().isOSNetBSD() &&
- !Distro(D.getVFS(), TC.getTriple()).IsGentoo() &&
- !TC.getTriple().isAndroid() &&
- TC.useIntegratedAs()))
+ !TC.getTriple().isPS4() && !TC.getTriple().isVE() &&
+ !TC.getTriple().isOSNetBSD() &&
+ !Distro(D.getVFS(), TC.getTriple()).IsGentoo() &&
+ !TC.getTriple().isAndroid() && TC.useIntegratedAs()))
CmdArgs.push_back("-faddrsig");
if (Arg *A = Args.getLastArg(options::OPT_fsymbol_partition_EQ)) {
@@ -6226,20 +6542,23 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
getCLFallback()->GetCommand(C, JA, Output, Inputs, Args, LinkingOutput);
C.addCommand(std::make_unique<FallbackCommand>(
JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
- std::move(CLCommand)));
+ Output, std::move(CLCommand)));
} else if (Args.hasArg(options::OPT__SLASH_fallback) &&
isa<PrecompileJobAction>(JA)) {
// In /fallback builds, run the main compilation even if the pch generation
// fails, so that the main compilation's fallback to cl.exe runs.
C.addCommand(std::make_unique<ForceSuccessCommand>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ Output));
} else if (D.CC1Main && !D.CCGenDiagnostics) {
// Invoke the CC1 directly in this process
- C.addCommand(std::make_unique<CC1Command>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<CC1Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
} else {
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
}
// Make the compile command echo its inputs for /showFilenames.
@@ -6554,13 +6873,12 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back(Args.MakeArgString(Twine(LangOptions::SSPStrong)));
}
- // Emit CodeView if -Z7, -Zd, or -gline-tables-only are present.
- if (Arg *DebugInfoArg =
- Args.getLastArg(options::OPT__SLASH_Z7, options::OPT__SLASH_Zd,
- options::OPT_gline_tables_only)) {
+ // Emit CodeView if -Z7 or -gline-tables-only are present.
+ if (Arg *DebugInfoArg = Args.getLastArg(options::OPT__SLASH_Z7,
+ options::OPT_gline_tables_only)) {
*EmitCodeView = true;
if (DebugInfoArg->getOption().matches(options::OPT__SLASH_Z7))
- *DebugInfoKind = codegenoptions::DebugInfoConstructor;
+ *DebugInfoKind = codegenoptions::LimitedDebugInfo;
else
*DebugInfoKind = codegenoptions::DebugLineTablesOnly;
} else {
@@ -6857,7 +7175,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// the guard for source type, however there is a test which asserts
// that some assembler invocation receives no -debug-info-kind,
// and it's not clear whether that test is just overly restrictive.
- DebugInfoKind = (WantDebug ? codegenoptions::DebugInfoConstructor
+ DebugInfoKind = (WantDebug ? codegenoptions::LimitedDebugInfo
: codegenoptions::NoDebugInfo);
// Add the -fdebug-compilation-dir flag if needed.
addDebugCompDirArg(Args, CmdArgs, C.getDriver().getVFS());
@@ -6943,6 +7261,15 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
}
break;
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be:
+ if (Args.hasArg(options::OPT_mmark_bti_property)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-aarch64-mark-bti-property");
+ }
+ break;
+
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
AddRISCVTargetArgs(Args, CmdArgs);
@@ -6970,15 +7297,26 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
if (getDebugFissionKind(D, Args, A) == DwarfFissionKind::Split &&
T.isOSBinFormatELF()) {
CmdArgs.push_back("-split-dwarf-output");
- CmdArgs.push_back(SplitDebugName(Args, Input, Output));
+ CmdArgs.push_back(SplitDebugName(JA, Args, Input, Output));
}
+ if (Triple.isAMDGPU())
+ handleAMDGPUCodeObjectVersionOptions(D, Args, CmdArgs);
+
assert(Input.isFilename() && "Invalid input.");
CmdArgs.push_back(Input.getFilename());
const char *Exec = getToolChain().getDriver().getClangProgramPath();
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
+ if (D.CC1Main && !D.CCGenDiagnostics) {
+ // Invoke cc1as directly in this process.
+ C.addCommand(std::make_unique<CC1Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
+ } else {
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
+ }
}
// Begin OffloadBundler
@@ -7064,7 +7402,7 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::None(),
TCArgs.MakeArgString(getToolChain().GetProgramPath(getShortName())),
- CmdArgs, None));
+ CmdArgs, None, Output));
}
void OffloadBundler::ConstructJobMultipleOutputs(
@@ -7125,12 +7463,13 @@ void OffloadBundler::ConstructJobMultipleOutputs(
}
CmdArgs.push_back(TCArgs.MakeArgString(UB));
CmdArgs.push_back("-unbundle");
+ CmdArgs.push_back("-allow-missing-bundles");
// All the inputs are encoded as commands.
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::None(),
TCArgs.MakeArgString(getToolChain().GetProgramPath(getShortName())),
- CmdArgs, None));
+ CmdArgs, None, Outputs));
}
void OffloadWrapper::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7160,5 +7499,5 @@ void OffloadWrapper::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::None(),
Args.MakeArgString(getToolChain().GetProgramPath(getShortName())),
- CmdArgs, Inputs));
+ CmdArgs, Inputs, Output));
}
diff --git a/clang/lib/Driver/ToolChains/CloudABI.cpp b/clang/lib/Driver/ToolChains/CloudABI.cpp
index 8dcfd4951bbf..3efca8776260 100644
--- a/clang/lib/Driver/ToolChains/CloudABI.cpp
+++ b/clang/lib/Driver/ToolChains/CloudABI.cpp
@@ -92,8 +92,9 @@ void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
// CloudABI - CloudABI tool chain which can call ld(1) directly.
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 1cac5a0822a4..6a95aa5ec628 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -37,6 +37,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/Option.h"
@@ -60,6 +61,62 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+static void renderRpassOptions(const ArgList &Args, ArgStringList &CmdArgs) {
+ if (const Arg *A = Args.getLastArg(options::OPT_Rpass_EQ))
+ CmdArgs.push_back(Args.MakeArgString(Twine("--plugin-opt=-pass-remarks=") +
+ A->getValue()));
+
+ if (const Arg *A = Args.getLastArg(options::OPT_Rpass_missed_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("--plugin-opt=-pass-remarks-missed=") + A->getValue()));
+
+ if (const Arg *A = Args.getLastArg(options::OPT_Rpass_analysis_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("--plugin-opt=-pass-remarks-analysis=") + A->getValue()));
+}
+
+static void renderRemarksOptions(const ArgList &Args, ArgStringList &CmdArgs,
+ const llvm::Triple &Triple,
+ const InputInfo &Input,
+ const InputInfo &Output) {
+ StringRef Format = "yaml";
+ if (const Arg *A = Args.getLastArg(options::OPT_fsave_optimization_record_EQ))
+ Format = A->getValue();
+
+ SmallString<128> F;
+ const Arg *A = Args.getLastArg(options::OPT_foptimization_record_file_EQ);
+ if (A)
+ F = A->getValue();
+ else if (Output.isFilename())
+ F = Output.getFilename();
+
+ assert(!F.empty() && "Cannot determine remarks output name.");
+ // Append "opt.ld.<format>" to the end of the file name.
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("--plugin-opt=opt-remarks-filename=") + F +
+ Twine(".opt.ld.") + Format));
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_foptimization_record_passes_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("--plugin-opt=opt-remarks-passes=") + A->getValue()));
+
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("--plugin-opt=opt-remarks-format=") + Format.data()));
+}
+
+static void renderRemarksHotnessOptions(const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if (Args.hasFlag(options::OPT_fdiagnostics_show_hotness,
+ options::OPT_fno_diagnostics_show_hotness, false))
+ CmdArgs.push_back("--plugin-opt=opt-remarks-with-hotness");
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fdiagnostics_hotness_threshold_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("--plugin-opt=opt-remarks-hotness-threshold=") + A->getValue()));
+}
+
void tools::addPathIfExists(const Driver &D, const Twine &Path,
ToolChain::path_list &Paths) {
if (D.getVFS().exists(Path))
@@ -214,6 +271,24 @@ void tools::AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
}
}
+void tools::addLinkerCompressDebugSectionsOption(
+ const ToolChain &TC, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) {
+ // GNU ld supports --compress-debug-sections=none|zlib|zlib-gnu|zlib-gabi
+ // whereas zlib is an alias to zlib-gabi. Therefore -gz=none|zlib|zlib-gnu
+ // are translated to --compress-debug-sections=none|zlib|zlib-gnu.
+ // -gz is not translated since ld --compress-debug-sections option requires an
+ // argument.
+ if (const Arg *A = Args.getLastArg(options::OPT_gz_EQ)) {
+ StringRef V = A->getValue();
+ if (V == "none" || V == "zlib" || V == "zlib-gnu")
+ CmdArgs.push_back(Args.MakeArgString("--compress-debug-sections=" + V));
+ else
+ TC.getDriver().Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << V;
+ }
+}
+
void tools::AddTargetFeature(const ArgList &Args,
std::vector<StringRef> &Features,
OptSpecifier OnOpt, OptSpecifier OffOpt,
@@ -226,11 +301,12 @@ void tools::AddTargetFeature(const ArgList &Args,
}
}
-/// Get the (LLVM) name of the R600 gpu we are targeting.
-static std::string getR600TargetGPU(const ArgList &Args) {
+/// Get the (LLVM) name of the AMDGPU gpu we are targeting.
+static std::string getAMDGPUTargetGPU(const llvm::Triple &T,
+ const ArgList &Args) {
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
- const char *GPUName = A->getValue();
- return llvm::StringSwitch<const char *>(GPUName)
+ auto GPUName = getProcessorFromTargetID(T, A->getValue());
+ return llvm::StringSwitch<std::string>(GPUName)
.Cases("rv630", "rv635", "r600")
.Cases("rv610", "rv620", "rs780", "rs880")
.Case("rv740", "rv770")
@@ -238,7 +314,7 @@ static std::string getR600TargetGPU(const ArgList &Args) {
.Cases("sumo", "sumo2", "sumo")
.Case("hemlock", "cypress")
.Case("aruba", "cayman")
- .Default(GPUName);
+ .Default(GPUName.str());
}
return "";
}
@@ -313,6 +389,7 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
return "";
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le: {
std::string TargetCPUName = ppc::getPPCTargetCPU(Args);
@@ -333,6 +410,11 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
return TargetCPUName;
}
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ return A->getValue();
+ return "";
case llvm::Triple::bpfel:
case llvm::Triple::bpfeb:
@@ -341,6 +423,8 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
case llvm::Triple::sparcv9:
if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
return A->getValue();
+ if (T.getArch() == llvm::Triple::sparc && T.isOSSolaris())
+ return "v9";
return "";
case llvm::Triple::x86:
@@ -359,7 +443,7 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
case llvm::Triple::r600:
case llvm::Triple::amdgcn:
- return getR600TargetGPU(Args);
+ return getAMDGPUTargetGPU(T, Args);
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
@@ -512,11 +596,13 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
Path));
}
- // Need this flag to turn on new pass manager via Gold plugin.
- if (Args.hasFlag(options::OPT_fexperimental_new_pass_manager,
- options::OPT_fno_experimental_new_pass_manager,
- /* Default */ ENABLE_EXPERIMENTAL_NEW_PASS_MANAGER)) {
- CmdArgs.push_back("-plugin-opt=new-pass-manager");
+ // Pass an option to enable/disable the new pass manager.
+ if (auto *A = Args.getLastArg(options::OPT_flegacy_pass_manager,
+ options::OPT_fno_legacy_pass_manager)) {
+ if (A->getOption().matches(options::OPT_flegacy_pass_manager))
+ CmdArgs.push_back("-plugin-opt=legacy-pass-manager");
+ else
+ CmdArgs.push_back("-plugin-opt=new-pass-manager");
}
// Setup statistics file output.
@@ -526,6 +612,21 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
Args.MakeArgString(Twine("-plugin-opt=stats-file=") + StatsFile));
addX86AlignBranchArgs(D, Args, CmdArgs, /*IsLTO=*/true);
+
+ // Handle remark diagnostics on screen options: '-Rpass-*'.
+ renderRpassOptions(Args, CmdArgs);
+
+ // Handle serialized remarks options: '-fsave-optimization-record'
+ // and '-foptimization-record-*'.
+ if (willEmitRemarks(Args))
+ renderRemarksOptions(Args, CmdArgs, ToolChain.getEffectiveTriple(), Input,
+ Output);
+
+ // Handle remarks hotness/threshold related options.
+ renderRemarksHotnessOptions(Args, CmdArgs);
+
+ addMachineOutlinerArgs(D, Args, CmdArgs, ToolChain.getEffectiveTriple(),
+ /*IsLTO=*/true);
}
void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
@@ -625,6 +726,16 @@ static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
return false;
}
+static const char *getAsNeededOption(const ToolChain &TC, bool as_needed) {
+ // While the Solaris 11.2 ld added --as-needed/--no-as-needed as aliases
+ // for the native forms -z ignore/-z record, they are missing in Illumos,
+ // so always use the native form.
+ if (TC.getTriple().isOSSolaris())
+ return as_needed ? "-zignore" : "-zrecord";
+ else
+ return as_needed ? "--as-needed" : "--no-as-needed";
+}
+
void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
ArgStringList &CmdArgs) {
// Fuchsia never needs these. Any sanitizer runtimes with system
@@ -634,7 +745,7 @@ void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
// Force linking against the system libraries sanitizers depends on
// (see PR15823 why this is necessary).
- CmdArgs.push_back("--no-as-needed");
+ CmdArgs.push_back(getAsNeededOption(TC, false));
// There's no libpthread or librt on RTEMS & Android.
if (TC.getTriple().getOS() != llvm::Triple::RTEMS &&
!TC.getTriple().isAndroid()) {
@@ -670,6 +781,11 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (!Args.hasArg(options::OPT_shared) && !TC.getTriple().isAndroid())
HelperStaticRuntimes.push_back("asan-preinit");
}
+ if (SanArgs.needsMemProfRt() && SanArgs.linkRuntimes()) {
+ SharedRuntimes.push_back("memprof");
+ if (!Args.hasArg(options::OPT_shared) && !TC.getTriple().isAndroid())
+ HelperStaticRuntimes.push_back("memprof-preinit");
+ }
if (SanArgs.needsUbsanRt() && SanArgs.linkRuntimes()) {
if (SanArgs.requiresMinimalRuntime())
SharedRuntimes.push_back("ubsan_minimal");
@@ -682,6 +798,8 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
else
SharedRuntimes.push_back("scudo");
}
+ if (SanArgs.needsTsanRt() && SanArgs.linkRuntimes())
+ SharedRuntimes.push_back("tsan");
if (SanArgs.needsHwasanRt() && SanArgs.linkRuntimes())
SharedRuntimes.push_back("hwasan");
}
@@ -705,6 +823,13 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
StaticRuntimes.push_back("asan_cxx");
}
+ if (!SanArgs.needsSharedRt() && SanArgs.needsMemProfRt() &&
+ SanArgs.linkRuntimes()) {
+ StaticRuntimes.push_back("memprof");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("memprof_cxx");
+ }
+
if (!SanArgs.needsSharedRt() && SanArgs.needsHwasanRt() && SanArgs.linkRuntimes()) {
StaticRuntimes.push_back("hwasan");
if (SanArgs.linkCXXRuntimes())
@@ -719,7 +844,8 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("msan_cxx");
}
- if (SanArgs.needsTsanRt() && SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsTsanRt() &&
+ SanArgs.linkRuntimes()) {
StaticRuntimes.push_back("tsan");
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("tsan_cxx");
@@ -779,8 +905,18 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
!Args.hasArg(options::OPT_shared)) {
addSanitizerRuntime(TC, Args, CmdArgs, "fuzzer", false, true);
- if (!Args.hasArg(clang::driver::options::OPT_nostdlibxx))
+ if (SanArgs.needsFuzzerInterceptors())
+ addSanitizerRuntime(TC, Args, CmdArgs, "fuzzer_interceptors", false,
+ true);
+ if (!Args.hasArg(clang::driver::options::OPT_nostdlibxx)) {
+ bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
+ !Args.hasArg(options::OPT_static);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bstatic");
TC.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bdynamic");
+ }
}
for (auto RT : SharedRuntimes)
@@ -828,7 +964,7 @@ bool tools::addXRayRuntime(const ToolChain&TC, const ArgList &Args, ArgStringLis
}
void tools::linkXRayRuntimeDeps(const ToolChain &TC, ArgStringList &CmdArgs) {
- CmdArgs.push_back("--no-as-needed");
+ CmdArgs.push_back(getAsNeededOption(TC, false));
CmdArgs.push_back("-lpthread");
if (!TC.getTriple().isOSOpenBSD())
CmdArgs.push_back("-lrt");
@@ -848,8 +984,14 @@ bool tools::areOptimizationsEnabled(const ArgList &Args) {
return false;
}
-const char *tools::SplitDebugName(const ArgList &Args, const InputInfo &Input,
+const char *tools::SplitDebugName(const JobAction &JA, const ArgList &Args,
+ const InputInfo &Input,
const InputInfo &Output) {
+ auto AddPostfix = [JA](auto &F) {
+ if (JA.getOffloadingDeviceKind() == Action::OFK_HIP)
+ F += (Twine("_") + JA.getOffloadingArch()).str();
+ F += ".dwo";
+ };
if (Arg *A = Args.getLastArg(options::OPT_gsplit_dwarf_EQ))
if (StringRef(A->getValue()) == "single")
return Args.MakeArgString(Output.getFilename());
@@ -857,14 +999,16 @@ const char *tools::SplitDebugName(const ArgList &Args, const InputInfo &Input,
Arg *FinalOutput = Args.getLastArg(options::OPT_o);
if (FinalOutput && Args.hasArg(options::OPT_c)) {
SmallString<128> T(FinalOutput->getValue());
- llvm::sys::path::replace_extension(T, "dwo");
+ llvm::sys::path::remove_filename(T);
+ llvm::sys::path::append(T, llvm::sys::path::stem(FinalOutput->getValue()));
+ AddPostfix(T);
return Args.MakeArgString(T);
} else {
// Use the compilation dir.
SmallString<128> T(
Args.getLastArgValue(options::OPT_fdebug_compilation_dir));
SmallString<128> F(llvm::sys::path::stem(Input.getBaseInput()));
- llvm::sys::path::replace_extension(F, "dwo");
+ AddPostfix(F);
T += F;
return Args.MakeArgString(F);
}
@@ -889,12 +1033,13 @@ void tools::SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
InputInfo II(types::TY_Object, Output.getFilename(), Output.getFilename());
// First extract the dwo sections.
- C.addCommand(std::make_unique<Command>(
- JA, T, ResponseFileSupport::AtFileCurCP(), Exec, ExtractArgs, II));
+ C.addCommand(std::make_unique<Command>(JA, T,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, ExtractArgs, II, Output));
// Then remove them from the original .o file.
C.addCommand(std::make_unique<Command>(
- JA, T, ResponseFileSupport::AtFileCurCP(), Exec, StripArgs, II));
+ JA, T, ResponseFileSupport::AtFileCurCP(), Exec, StripArgs, II, Output));
}
// Claim options we don't want to warn if they are unused. We do this for
@@ -995,8 +1140,6 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
break;
case llvm::Triple::ppc:
- case llvm::Triple::sparc:
- case llvm::Triple::sparcel:
case llvm::Triple::sparcv9:
IsPICLevelTwo = true; // "-fPIE"
break;
@@ -1253,7 +1396,7 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
bool AsNeeded = LGT == LibGccType::UnspecifiedLibGcc &&
!TC.getTriple().isAndroid() && !TC.getTriple().isOSCygMing();
if (AsNeeded)
- CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back(getAsNeededOption(TC, true));
switch (UNW) {
case ToolChain::UNW_None:
@@ -1281,7 +1424,7 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
}
if (AsNeeded)
- CmdArgs.push_back("--no-as-needed");
+ CmdArgs.push_back(getAsNeededOption(TC, false));
}
static void AddLibgcc(const ToolChain &TC, const Driver &D,
@@ -1405,3 +1548,77 @@ void tools::addX86AlignBranchArgs(const Driver &D, const ArgList &Args,
}
}
}
+
+unsigned tools::getOrCheckAMDGPUCodeObjectVersion(
+ const Driver &D, const llvm::opt::ArgList &Args, bool Diagnose) {
+ const unsigned MinCodeObjVer = 2;
+ const unsigned MaxCodeObjVer = 4;
+ unsigned CodeObjVer = 3;
+
+ // Emit warnings for legacy options even if they are overridden.
+ if (Diagnose) {
+ if (Args.hasArg(options::OPT_mno_code_object_v3_legacy))
+ D.Diag(diag::warn_drv_deprecated_arg) << "-mno-code-object-v3"
+ << "-mcode-object-version=2";
+
+ if (Args.hasArg(options::OPT_mcode_object_v3_legacy))
+ D.Diag(diag::warn_drv_deprecated_arg) << "-mcode-object-v3"
+ << "-mcode-object-version=3";
+ }
+
+ // The last of -mcode-object-v3, -mno-code-object-v3 and
+ // -mcode-object-version=<version> wins.
+ if (auto *CodeObjArg =
+ Args.getLastArg(options::OPT_mcode_object_v3_legacy,
+ options::OPT_mno_code_object_v3_legacy,
+ options::OPT_mcode_object_version_EQ)) {
+ if (CodeObjArg->getOption().getID() ==
+ options::OPT_mno_code_object_v3_legacy) {
+ CodeObjVer = 2;
+ } else if (CodeObjArg->getOption().getID() ==
+ options::OPT_mcode_object_v3_legacy) {
+ CodeObjVer = 3;
+ } else {
+ auto Remnant =
+ StringRef(CodeObjArg->getValue()).getAsInteger(0, CodeObjVer);
+ if (Diagnose &&
+ (Remnant || CodeObjVer < MinCodeObjVer || CodeObjVer > MaxCodeObjVer))
+ D.Diag(diag::err_drv_invalid_int_value)
+ << CodeObjArg->getAsString(Args) << CodeObjArg->getValue();
+ }
+ }
+ return CodeObjVer;
+}
+
+void tools::addMachineOutlinerArgs(const Driver &D,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const llvm::Triple &Triple, bool IsLTO) {
+ auto addArg = [&, IsLTO](const Twine &Arg) {
+ if (IsLTO) {
+ CmdArgs.push_back(Args.MakeArgString("-plugin-opt=" + Arg));
+ } else {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString(Arg));
+ }
+ };
+
+ if (Arg *A = Args.getLastArg(options::OPT_moutline,
+ options::OPT_mno_outline)) {
+ if (A->getOption().matches(options::OPT_moutline)) {
+ // We only support -moutline in AArch64 and ARM targets right now. If
+ // we're not compiling for these, emit a warning and ignore the flag.
+ // Otherwise, add the proper mllvm flags.
+ if (!(Triple.isARM() || Triple.isThumb() ||
+ Triple.getArch() == llvm::Triple::aarch64 ||
+ Triple.getArch() == llvm::Triple::aarch64_32)) {
+ D.Diag(diag::warn_drv_moutline_unsupported_opt) << Triple.getArchName();
+ } else {
+ addArg(Twine("-enable-machine-outliner"));
+ }
+ } else {
+ // Disable all outlining behaviour.
+ addArg(Twine("-enable-machine-outliner=never"));
+ }
+ }
+}
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.h b/clang/lib/Driver/ToolChains/CommonArgs.h
index 29dedec9b09c..187c340d1c3c 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -27,6 +27,10 @@ void AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs, const JobAction &JA);
+void addLinkerCompressDebugSectionsOption(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
+
void claimNoWarnArgs(const llvm::opt::ArgList &Args);
bool addSanitizerRuntimes(const ToolChain &TC, const llvm::opt::ArgList &Args,
@@ -45,7 +49,7 @@ void AddRunTimeLibs(const ToolChain &TC, const Driver &D,
llvm::opt::ArgStringList &CmdArgs,
const llvm::opt::ArgList &Args);
-const char *SplitDebugName(const llvm::opt::ArgList &Args,
+const char *SplitDebugName(const JobAction &JA, const llvm::opt::ArgList &Args,
const InputInfo &Input, const InputInfo &Output);
void SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
@@ -133,6 +137,14 @@ void addMultilibFlag(bool Enabled, const char *const Flag,
void addX86AlignBranchArgs(const Driver &D, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs, bool IsLTO);
+
+unsigned getOrCheckAMDGPUCodeObjectVersion(const Driver &D,
+ const llvm::opt::ArgList &Args,
+ bool Diagnose = false);
+
+void addMachineOutlinerArgs(const Driver &D, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const llvm::Triple &Triple, bool IsLTO);
} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/clang/lib/Driver/ToolChains/CrossWindows.cpp b/clang/lib/Driver/ToolChains/CrossWindows.cpp
index 127a8a5f24cc..07abf4f83f7d 100644
--- a/clang/lib/Driver/ToolChains/CrossWindows.cpp
+++ b/clang/lib/Driver/ToolChains/CrossWindows.cpp
@@ -58,7 +58,7 @@ void tools::CrossWindows::Assembler::ConstructJob(
Exec = Args.MakeArgString(Assembler);
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
void tools::CrossWindows::Linker::ConstructJob(
@@ -203,8 +203,9 @@ void tools::CrossWindows::Linker::ConstructJob(
Exec = Args.MakeArgString(TC.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
}
CrossWindowsToolChain::CrossWindowsToolChain(const Driver &D,
@@ -270,10 +271,10 @@ AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
}
void CrossWindowsToolChain::
-AddCXXStdlibLibArgs(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const {
- if (GetCXXStdlibType(DriverArgs) == ToolChain::CST_Libcxx)
- CC1Args.push_back("-lc++");
+AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ if (GetCXXStdlibType(Args) == ToolChain::CST_Libcxx)
+ CmdArgs.push_back("-lc++");
}
clang::SanitizerMask CrossWindowsToolChain::getSupportedSanitizers() const {
diff --git a/clang/lib/Driver/ToolChains/CrossWindows.h b/clang/lib/Driver/ToolChains/CrossWindows.h
index df9a7f71bf9f..ffe75332c2e8 100644
--- a/clang/lib/Driver/ToolChains/CrossWindows.h
+++ b/clang/lib/Driver/ToolChains/CrossWindows.h
@@ -11,6 +11,7 @@
#include "Cuda.h"
#include "Gnu.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -59,8 +60,9 @@ public:
bool isPIEDefault() const override;
bool isPICDefaultForced() const override;
- unsigned int GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
- return 0;
+ LangOptions::StackProtectorMode
+ GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return LangOptions::SSPOff;
}
void
diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp
index 110a0bca9bc1..d14776c5f5ba 100644
--- a/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -16,6 +16,7 @@
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Host.h"
@@ -32,30 +33,81 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+namespace {
+struct CudaVersionInfo {
+ std::string DetectedVersion;
+ CudaVersion Version;
+};
// Parses the contents of version.txt in an CUDA installation. It should
// contain one line of the from e.g. "CUDA Version 7.5.2".
-void CudaInstallationDetector::ParseCudaVersionFile(llvm::StringRef V) {
- Version = CudaVersion::UNKNOWN;
+CudaVersionInfo parseCudaVersionFile(llvm::StringRef V) {
+ V = V.trim();
if (!V.startswith("CUDA Version "))
- return;
+ return {V.str(), CudaVersion::UNKNOWN};
V = V.substr(strlen("CUDA Version "));
SmallVector<StringRef,4> VersionParts;
V.split(VersionParts, '.');
- if (VersionParts.size() < 2)
- return;
- DetectedVersion = join_items(".", VersionParts[0], VersionParts[1]);
- Version = CudaStringToVersion(DetectedVersion);
- if (Version != CudaVersion::UNKNOWN) {
- // TODO(tra): remove the warning once we have all features of 10.2 and 11.0
- // implemented.
- DetectedVersionIsNotSupported = Version > CudaVersion::LATEST_SUPPORTED;
- return;
- }
+ return {"version.txt: " + V.str() + ".",
+ VersionParts.size() < 2
+ ? CudaVersion::UNKNOWN
+ : CudaStringToVersion(
+ join_items(".", VersionParts[0], VersionParts[1]))};
+}
- Version = CudaVersion::LATEST_SUPPORTED;
- DetectedVersionIsNotSupported = true;
+CudaVersion getCudaVersion(uint32_t raw_version) {
+ if (raw_version < 7050)
+ return CudaVersion::CUDA_70;
+ if (raw_version < 8000)
+ return CudaVersion::CUDA_75;
+ if (raw_version < 9000)
+ return CudaVersion::CUDA_80;
+ if (raw_version < 9010)
+ return CudaVersion::CUDA_90;
+ if (raw_version < 9020)
+ return CudaVersion::CUDA_91;
+ if (raw_version < 10000)
+ return CudaVersion::CUDA_92;
+ if (raw_version < 10010)
+ return CudaVersion::CUDA_100;
+ if (raw_version < 10020)
+ return CudaVersion::CUDA_101;
+ if (raw_version < 11000)
+ return CudaVersion::CUDA_102;
+ if (raw_version < 11010)
+ return CudaVersion::CUDA_110;
+ return CudaVersion::LATEST;
}
+CudaVersionInfo parseCudaHFile(llvm::StringRef Input) {
+ // Helper lambda which skips the words if the line starts with them or returns
+ // None otherwise.
+ auto StartsWithWords =
+ [](llvm::StringRef Line,
+ const SmallVector<StringRef, 3> words) -> llvm::Optional<StringRef> {
+ for (StringRef word : words) {
+ if (!Line.consume_front(word))
+ return {};
+ Line = Line.ltrim();
+ }
+ return Line;
+ };
+
+ Input = Input.ltrim();
+ while (!Input.empty()) {
+ if (auto Line =
+ StartsWithWords(Input.ltrim(), {"#", "define", "CUDA_VERSION"})) {
+ uint32_t RawVersion;
+ Line->consumeInteger(10, RawVersion);
+ return {"cuda.h: CUDA_VERSION=" + Twine(RawVersion).str() + ".",
+ getCudaVersion(RawVersion)};
+ }
+ // Find next non-empty line.
+ Input = Input.drop_front(Input.find_first_of("\n\r")).ltrim();
+ }
+ return {"cuda.h: CUDA_VERSION not found.", CudaVersion::UNKNOWN};
+}
+} // namespace
+
void CudaInstallationDetector::WarnIfUnsupportedVersion() {
if (DetectedVersionIsNotSupported)
D.Diag(diag::warn_drv_unknown_cuda_version)
@@ -152,16 +204,31 @@ CudaInstallationDetector::CudaInstallationDetector(
else
continue;
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> VersionFile =
- FS.getBufferForFile(InstallPath + "/version.txt");
- if (!VersionFile) {
- // CUDA 7.0 doesn't have a version.txt, so guess that's our version if
- // version.txt isn't present.
- Version = CudaVersion::CUDA_70;
- } else {
- ParseCudaVersionFile((*VersionFile)->getBuffer());
+ CudaVersionInfo VersionInfo = {"", CudaVersion::UNKNOWN};
+ if (auto VersionFile = FS.getBufferForFile(InstallPath + "/version.txt"))
+ VersionInfo = parseCudaVersionFile((*VersionFile)->getBuffer());
+ // If version file didn't give us the version, try to find it in cuda.h
+ if (VersionInfo.Version == CudaVersion::UNKNOWN)
+ if (auto CudaHFile = FS.getBufferForFile(InstallPath + "/include/cuda.h"))
+ VersionInfo = parseCudaHFile((*CudaHFile)->getBuffer());
+ // As the last resort, make an educated guess between CUDA-7.0, (which had
+ // no version.txt file and had old-style libdevice bitcode ) and an unknown
+ // recent CUDA version (no version.txt, new style bitcode).
+ if (VersionInfo.Version == CudaVersion::UNKNOWN) {
+ VersionInfo.Version = (FS.exists(LibDevicePath + "/libdevice.10.bc"))
+ ? Version = CudaVersion::LATEST
+ : Version = CudaVersion::CUDA_70;
+ VersionInfo.DetectedVersion =
+ "No version found in version.txt or cuda.h.";
}
+ Version = VersionInfo.Version;
+ DetectedVersion = VersionInfo.DetectedVersion;
+
+ // TODO(tra): remove the warning once we have all features of 10.2
+ // and 11.0 implemented.
+ DetectedVersionIsNotSupported = Version > CudaVersion::LATEST_SUPPORTED;
+
if (Version >= CudaVersion::CUDA_90) {
// CUDA-9+ uses single libdevice file for all GPU variants.
std::string FilePath = LibDevicePath + "/libdevice.10.bc";
@@ -259,13 +326,13 @@ void CudaInstallationDetector::AddCudaIncludeArgs(
void CudaInstallationDetector::CheckCudaVersionSupportsArch(
CudaArch Arch) const {
if (Arch == CudaArch::UNKNOWN || Version == CudaVersion::UNKNOWN ||
- ArchsWithBadVersion.count(Arch) > 0)
+ ArchsWithBadVersion[(int)Arch])
return;
auto MinVersion = MinVersionForCudaArch(Arch);
auto MaxVersion = MaxVersionForCudaArch(Arch);
if (Version < MinVersion || Version > MaxVersion) {
- ArchsWithBadVersion.insert(Arch);
+ ArchsWithBadVersion[(int)Arch] = true;
D.Diag(diag::err_drv_cuda_version_unsupported)
<< CudaArchToString(Arch) << CudaVersionToString(MinVersion)
<< CudaVersionToString(MaxVersion) << InstallPath
@@ -317,7 +384,7 @@ static DeviceDebugInfoLevel mustEmitDebugInfo(const ArgList &Args) {
}
return IsDebugEnabled ? EmitSameDebugInfoAsHost : DebugDirectivesOnly;
}
- return DisableDebugInfo;
+ return willEmitRemarks(Args) ? DebugDirectivesOnly : DisableDebugInfo;
}
void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -427,7 +494,7 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
JA, *this,
ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
"--options-file"},
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
static bool shouldIncludePTX(const ArgList &Args, const char *gpu_arch) {
@@ -496,7 +563,7 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
JA, *this,
ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
"--options-file"},
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -533,11 +600,6 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-arch");
CmdArgs.push_back(Args.MakeArgString(GPUArch));
- // Assume that the directory specified with --libomptarget_nvptx_path
- // contains the static library libomptarget-nvptx.a.
- if (const Arg *A = Args.getLastArg(options::OPT_libomptarget_nvptx_path_EQ))
- CmdArgs.push_back(Args.MakeArgString(Twine("-L") + A->getValue()));
-
// Add paths specified in LIBRARY_PATH environment variable as -L options.
addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
@@ -547,9 +609,6 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
llvm::sys::path::append(DefaultLibPath, "lib" CLANG_LIBDIR_SUFFIX);
CmdArgs.push_back(Args.MakeArgString(Twine("-L") + DefaultLibPath));
- // Add linking against library implementing OpenMP calls on NVPTX target.
- CmdArgs.push_back("-lomptarget-nvptx");
-
for (const auto &II : Inputs) {
if (II.getType() == types::TY_LLVM_IR ||
II.getType() == types::TY_LTO_IR ||
@@ -577,7 +636,7 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
JA, *this,
ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
"--options-file"},
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
/// CUDA toolchain. Our assembler is ptxas, and our "linker" is fatbinary,
@@ -653,33 +712,30 @@ void CudaToolChain::addClangTargetOptions(
CC1Args.push_back("-mlink-builtin-bitcode");
CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
+ std::string CudaVersionStr;
+
// New CUDA versions often introduce new instructions that are only supported
// by new PTX version, so we need to raise PTX level to enable them in NVPTX
// back-end.
const char *PtxFeature = nullptr;
switch (CudaInstallation.version()) {
- case CudaVersion::CUDA_110:
- PtxFeature = "+ptx70";
- break;
- case CudaVersion::CUDA_102:
- PtxFeature = "+ptx65";
- break;
- case CudaVersion::CUDA_101:
- PtxFeature = "+ptx64";
- break;
- case CudaVersion::CUDA_100:
- PtxFeature = "+ptx63";
- break;
- case CudaVersion::CUDA_92:
- PtxFeature = "+ptx61";
- break;
- case CudaVersion::CUDA_91:
- PtxFeature = "+ptx61";
- break;
- case CudaVersion::CUDA_90:
- PtxFeature = "+ptx60";
+#define CASE_CUDA_VERSION(CUDA_VER, PTX_VER) \
+ case CudaVersion::CUDA_##CUDA_VER: \
+ CudaVersionStr = #CUDA_VER; \
+ PtxFeature = "+ptx" #PTX_VER; \
break;
+ CASE_CUDA_VERSION(110, 70);
+ CASE_CUDA_VERSION(102, 65);
+ CASE_CUDA_VERSION(101, 64);
+ CASE_CUDA_VERSION(100, 63);
+ CASE_CUDA_VERSION(92, 61);
+ CASE_CUDA_VERSION(91, 61);
+ CASE_CUDA_VERSION(90, 60);
+#undef CASE_CUDA_VERSION
default:
+ // If unknown CUDA version, we take it as CUDA 8.0. Same assumption is also
+ // made in libomptarget/deviceRTLs.
+ CudaVersionStr = "80";
PtxFeature = "+ptx42";
}
CC1Args.append({"-target-feature", PtxFeature});
@@ -694,9 +750,6 @@ void CudaToolChain::addClangTargetOptions(
if (DeviceOffloadingKind == Action::OFK_OpenMP) {
SmallVector<StringRef, 8> LibraryPaths;
- if (const Arg *A = DriverArgs.getLastArg(options::OPT_libomptarget_nvptx_path_EQ))
- LibraryPaths.push_back(A->getValue());
-
// Add user defined library paths from LIBRARY_PATH.
llvm::Optional<std::string> LibPath =
llvm::sys::Process::GetEnv("LIBRARY_PATH");
@@ -714,22 +767,38 @@ void CudaToolChain::addClangTargetOptions(
llvm::sys::path::append(DefaultLibPath, Twine("lib") + CLANG_LIBDIR_SUFFIX);
LibraryPaths.emplace_back(DefaultLibPath.c_str());
- std::string LibOmpTargetName =
- "libomptarget-nvptx-" + GpuArch.str() + ".bc";
- bool FoundBCLibrary = false;
- for (StringRef LibraryPath : LibraryPaths) {
- SmallString<128> LibOmpTargetFile(LibraryPath);
- llvm::sys::path::append(LibOmpTargetFile, LibOmpTargetName);
- if (llvm::sys::fs::exists(LibOmpTargetFile)) {
+ // First check whether user specifies bc library
+ if (const Arg *A =
+ DriverArgs.getLastArg(options::OPT_libomptarget_nvptx_bc_path_EQ)) {
+ std::string LibOmpTargetName(A->getValue());
+ if (llvm::sys::fs::exists(LibOmpTargetName)) {
CC1Args.push_back("-mlink-builtin-bitcode");
- CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetFile));
- FoundBCLibrary = true;
- break;
+ CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetName));
+ } else {
+ getDriver().Diag(diag::err_drv_omp_offload_target_bcruntime_not_found)
+ << LibOmpTargetName;
+ }
+ } else {
+ bool FoundBCLibrary = false;
+
+ std::string LibOmpTargetName = "libomptarget-nvptx-cuda_" +
+ CudaVersionStr + "-" + GpuArch.str() +
+ ".bc";
+
+ for (StringRef LibraryPath : LibraryPaths) {
+ SmallString<128> LibOmpTargetFile(LibraryPath);
+ llvm::sys::path::append(LibOmpTargetFile, LibOmpTargetName);
+ if (llvm::sys::fs::exists(LibOmpTargetFile)) {
+ CC1Args.push_back("-mlink-builtin-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetFile));
+ FoundBCLibrary = true;
+ break;
+ }
}
+ if (!FoundBCLibrary)
+ getDriver().Diag(diag::err_drv_omp_offload_target_missingbcruntime)
+ << LibOmpTargetName;
}
- if (!FoundBCLibrary)
- getDriver().Diag(diag::warn_drv_omp_offload_target_missingbcruntime)
- << LibOmpTargetName;
}
}
diff --git a/clang/lib/Driver/ToolChains/Cuda.h b/clang/lib/Driver/ToolChains/Cuda.h
index 873eb7338a30..6ae4415a563a 100644
--- a/clang/lib/Driver/ToolChains/Cuda.h
+++ b/clang/lib/Driver/ToolChains/Cuda.h
@@ -15,9 +15,9 @@
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/VersionTuple.h"
+#include <bitset>
#include <set>
#include <vector>
@@ -41,7 +41,7 @@ private:
// CUDA architectures for which we have raised an error in
// CheckCudaVersionSupportsArch.
- mutable llvm::SmallSet<CudaArch, 4> ArchsWithBadVersion;
+ mutable std::bitset<(int)CudaArch::LAST> ArchsWithBadVersion;
public:
CudaInstallationDetector(const Driver &D, const llvm::Triple &HostTriple,
@@ -78,9 +78,6 @@ public:
return LibDeviceMap.lookup(Gpu);
}
void WarnIfUnsupportedVersion();
-
-private:
- void ParseCudaVersionFile(llvm::StringRef V);
};
namespace tools {
@@ -188,6 +185,8 @@ public:
const llvm::opt::ArgList &Args) const override;
unsigned GetDefaultDwarfVersion() const override { return 2; }
+ // NVPTX supports only DWARF2.
+ unsigned getMaxDwarfVersion() const override { return 2; }
const ToolChain &HostTC;
CudaInstallationDetector CudaInstallation;
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index 7b879f8cb652..eb7bd4aec898 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "Darwin.h"
+#include "Arch/AArch64.h"
#include "Arch/ARM.h"
#include "CommonArgs.h"
#include "clang/Basic/AlignedAllocation.h"
@@ -58,7 +59,7 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
.Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm)
.Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm)
.Cases("armv7s", "xscale", llvm::Triple::arm)
- .Case("arm64", llvm::Triple::aarch64)
+ .Cases("arm64", "arm64e", llvm::Triple::aarch64)
.Case("arm64_32", llvm::Triple::aarch64_32)
.Case("r600", llvm::Triple::r600)
.Case("amdgcn", llvm::Triple::amdgcn)
@@ -74,7 +75,7 @@ void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) {
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseArch(Str);
T.setArch(Arch);
- if (Str == "x86_64h")
+ if (Str == "x86_64h" || Str == "arm64e")
T.setArchName(Str);
else if (ArchKind == llvm::ARM::ArchKind::ARMV6M ||
ArchKind == llvm::ARM::ArchKind::ARMV7M ||
@@ -149,7 +150,7 @@ void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
void darwin::MachOTool::anchor() {}
@@ -204,15 +205,19 @@ static bool shouldLinkerNotDedup(bool IsLinkerOnlyAction, const ArgList &Args) {
void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
ArgStringList &CmdArgs,
const InputInfoList &Inputs,
- unsigned Version[5]) const {
+ unsigned Version[5], bool LinkerIsLLD,
+ bool LinkerIsLLDDarwinNew) const {
const Driver &D = getToolChain().getDriver();
const toolchains::MachO &MachOTC = getMachOToolChain();
// Newer linkers support -demangle. Pass it if supported and not disabled by
// the user.
- if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ if ((Version[0] >= 100 || LinkerIsLLD) &&
+ !Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("-demangle");
+ // FIXME: Pass most of the flags below that check Version if LinkerIsLLD too.
+
if (Args.hasArg(options::OPT_rdynamic) && Version[0] >= 137)
CmdArgs.push_back("-export_dynamic");
@@ -249,7 +254,9 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
// Since this is passed unconditionally, ld64 will never look for libLTO.dylib
// next to it. That's ok since ld64 using a libLTO.dylib not matching the
// clang version won't work anyways.
- if (Version[0] >= 133) {
+ // lld is built at the same revision as clang and statically links in
+ // LLVM libraries, so it doesn't need libLTO.dylib.
+ if (Version[0] >= 133 && !LinkerIsLLD) {
// Search for libLTO in <InstalledDir>/../lib/libLTO.dylib
StringRef P = llvm::sys::path::parent_path(D.Dir);
SmallString<128> LibLTOPath(P);
@@ -332,7 +339,7 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
Args.AddAllArgs(CmdArgs, options::OPT_init);
// Add the deployment target.
- if (Version[0] >= 520)
+ if (Version[0] >= 520 || LinkerIsLLDDarwinNew)
MachOTC.addPlatformVersionArgs(Args, CmdArgs);
else
MachOTC.addMinVersionArgs(Args, CmdArgs);
@@ -522,7 +529,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(getToolChain().GetProgramPath("touch"));
CmdArgs.push_back(Output.getFilename());
C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::None(), Exec, CmdArgs, None));
+ JA, *this, ResponseFileSupport::None(), Exec, CmdArgs, None, Output));
return;
}
@@ -533,9 +540,14 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
<< A->getAsString(Args);
}
+ bool LinkerIsLLD, LinkerIsLLDDarwinNew;
+ const char *Exec = Args.MakeArgString(
+ getToolChain().GetLinkerPath(&LinkerIsLLD, &LinkerIsLLDDarwinNew));
+
// I'm not sure why this particular decomposition exists in gcc, but
// we follow suite for ease of comparison.
- AddLinkArgs(C, Args, CmdArgs, Inputs, Version);
+ AddLinkArgs(C, Args, CmdArgs, Inputs, Version, LinkerIsLLD,
+ LinkerIsLLDDarwinNew);
if (willEmitRemarks(Args) &&
checkRemarksOptions(getToolChain().getDriver(), Args,
@@ -686,16 +698,20 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- ResponseFileSupport ResponseSupport = ResponseFileSupport::AtFileUTF8();
- if (Version[0] < 607) {
+ ResponseFileSupport ResponseSupport;
+ if (LinkerIsLLDDarwinNew) {
+ // Xcode12's ld64 added support for @response files, but it's crashy:
+ // https://openradar.appspot.com/radar?id=4933317065441280
+ // FIXME: Pass this for ld64 once it no longer crashes.
+ ResponseSupport = ResponseFileSupport::AtFileUTF8();
+ } else {
// For older versions of the linker, use the legacy filelist method instead.
ResponseSupport = {ResponseFileSupport::RF_FileList, llvm::sys::WEM_UTF8,
"-filelist"};
}
- const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
std::unique_ptr<Command> Cmd = std::make_unique<Command>(
- JA, *this, ResponseSupport, Exec, CmdArgs, Inputs);
+ JA, *this, ResponseSupport, Exec, CmdArgs, Inputs, Output);
Cmd->setInputFileList(std::move(InputFileList));
C.addCommand(std::move(Cmd));
}
@@ -720,7 +736,7 @@ void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("lipo"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
@@ -741,7 +757,7 @@ void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("dsymutil"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
@@ -765,7 +781,7 @@ void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("dwarfdump"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
MachO::MachO(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
@@ -884,8 +900,11 @@ StringRef MachO::getMachOArchName(const ArgList &Args) const {
case llvm::Triple::aarch64_32:
return "arm64_32";
- case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64: {
+ if (getTriple().isArm64e())
+ return "arm64e";
return "arm64";
+ }
case llvm::Triple::thumb:
case llvm::Triple::arm:
@@ -996,6 +1015,9 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
return;
if (isTargetAppleSiliconMac())
return;
+ // ARC runtime is supported everywhere on arm64e.
+ if (getTriple().isArm64e())
+ return;
ObjCRuntime runtime = getDefaultObjCRuntime(/*nonfragile*/ true);
@@ -1062,10 +1084,9 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
DarwinLibName += Component;
if (!(Opts & RLO_IsEmbedded))
DarwinLibName += "_";
- DarwinLibName += getOSLibraryNameSuffix();
- } else
- DarwinLibName += getOSLibraryNameSuffix(true);
+ }
+ DarwinLibName += getOSLibraryNameSuffix();
DarwinLibName += IsShared ? "_dynamic.dylib" : ".a";
SmallString<128> Dir(getDriver().ResourceDir);
llvm::sys::path::append(
@@ -1196,8 +1217,8 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
// runtime's functionality.
if (hasExportSymbolDirective(Args)) {
if (ForGCOV) {
- addExportedSymbol(CmdArgs, "___gcov_flush");
- addExportedSymbol(CmdArgs, "_flush_fn_list");
+ addExportedSymbol(CmdArgs, "___gcov_dump");
+ addExportedSymbol(CmdArgs, "___gcov_reset");
addExportedSymbol(CmdArgs, "_writeout_fn_list");
addExportedSymbol(CmdArgs, "_reset_fn_list");
} else {
@@ -1697,7 +1718,7 @@ inferDeploymentTargetFromArch(DerivedArgList &Args, const Darwin &Toolchain,
llvm::Triple::OSType OSTy = llvm::Triple::UnknownOS;
StringRef MachOArchName = Toolchain.getMachOArchName(Args);
- if (MachOArchName == "arm64") {
+ if (MachOArchName == "arm64" || MachOArchName == "arm64e") {
#if __arm64__
// A clang running on an Apple Silicon mac defaults
// to building for mac when building for arm64 rather than
@@ -2021,21 +2042,42 @@ void DarwinClang::AddClangCXXStdlibIncludeArgs(
switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx: {
- // On Darwin, libc++ is installed alongside the compiler in
- // include/c++/v1, so get from '<install>/bin' to '<install>/include/c++/v1'.
- {
- llvm::SmallString<128> P = llvm::StringRef(getDriver().getInstalledDir());
- // Note that P can be relative, so we have to '..' and not parent_path.
- llvm::sys::path::append(P, "..", "include", "c++", "v1");
- addSystemInclude(DriverArgs, CC1Args, P);
+ // On Darwin, libc++ can be installed in one of the following two places:
+ // 1. Alongside the compiler in <install>/include/c++/v1
+ // 2. In a SDK (or a custom sysroot) in <sysroot>/usr/include/c++/v1
+ //
+ // The precendence of paths is as listed above, i.e. we take the first path
+ // that exists. Also note that we never include libc++ twice -- we take the
+ // first path that exists and don't send the other paths to CC1 (otherwise
+ // include_next could break).
+
+ // Check for (1)
+ // Get from '<install>/bin' to '<install>/include/c++/v1'.
+ // Note that InstallBin can be relative, so we use '..' instead of
+ // parent_path.
+ llvm::SmallString<128> InstallBin =
+ llvm::StringRef(getDriver().getInstalledDir()); // <install>/bin
+ llvm::sys::path::append(InstallBin, "..", "include", "c++", "v1");
+ if (getVFS().exists(InstallBin)) {
+ addSystemInclude(DriverArgs, CC1Args, InstallBin);
+ return;
+ } else if (DriverArgs.hasArg(options::OPT_v)) {
+ llvm::errs() << "ignoring nonexistent directory \"" << InstallBin
+ << "\"\n";
}
- // Also add <sysroot>/usr/include/c++/v1 unless -nostdinc is used,
- // to match the legacy behavior in CC1.
- if (!DriverArgs.hasArg(options::OPT_nostdinc)) {
- llvm::SmallString<128> P = Sysroot;
- llvm::sys::path::append(P, "usr", "include", "c++", "v1");
- addSystemInclude(DriverArgs, CC1Args, P);
+
+ // Otherwise, check for (2)
+ llvm::SmallString<128> SysrootUsr = Sysroot;
+ llvm::sys::path::append(SysrootUsr, "usr", "include", "c++", "v1");
+ if (getVFS().exists(SysrootUsr)) {
+ addSystemInclude(DriverArgs, CC1Args, SysrootUsr);
+ return;
+ } else if (DriverArgs.hasArg(options::OPT_v)) {
+ llvm::errs() << "ignoring nonexistent directory \"" << SysrootUsr
+ << "\"\n";
}
+
+ // Otherwise, don't add any path.
break;
}
@@ -2271,11 +2313,6 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
}
}
- if (getTriple().isX86())
- if (!Args.hasArgNoClaim(options::OPT_mtune_EQ))
- DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_mtune_EQ),
- "core2");
-
// Add the arch options based on the particular spelling of -arch, to match
// how the driver driver works.
if (!BoundArch.empty()) {
@@ -2413,6 +2450,13 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
// Enable compatibility mode for NSItemProviderCompletionHandler in
// Foundation/NSItemProvider.h.
CC1Args.push_back("-fcompatibility-qualified-id-block-type-checking");
+
+ // Give static local variables in inline functions hidden visibility when
+ // -fvisibility-inlines-hidden is enabled.
+ if (!DriverArgs.getLastArgNoClaim(
+ options::OPT_fvisibility_inlines_hidden_static_local_var,
+ options::OPT_fno_visibility_inlines_hidden_static_local_var))
+ CC1Args.push_back("-fvisibility-inlines-hidden-static-local-var");
}
DerivedArgList *
@@ -2713,6 +2757,7 @@ void Darwin::CheckObjCARC() const {
SanitizerMask Darwin::getSupportedSanitizers() const {
const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
+ const bool IsAArch64 = getTriple().getArch() == llvm::Triple::aarch64;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::PointerCompare;
@@ -2730,9 +2775,8 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
&& !(isTargetIPhoneOS() && isIPhoneOSVersionLT(5, 0)))
Res |= SanitizerKind::Vptr;
- if (isTargetMacOS()) {
- if (IsX86_64)
- Res |= SanitizerKind::Thread;
+ if ((IsX86_64 || IsAArch64) && isTargetMacOS()) {
+ Res |= SanitizerKind::Thread;
} else if (isTargetIOSSimulator() || isTargetTvOSSimulator()) {
if (IsX86_64)
Res |= SanitizerKind::Thread;
diff --git a/clang/lib/Driver/ToolChains/Darwin.h b/clang/lib/Driver/ToolChains/Darwin.h
index 64c252efea7d..25c63fed922a 100644
--- a/clang/lib/Driver/ToolChains/Darwin.h
+++ b/clang/lib/Driver/ToolChains/Darwin.h
@@ -11,6 +11,7 @@
#include "Cuda.h"
#include "ROCm.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Driver/DarwinSDKInfo.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -62,7 +63,8 @@ class LLVM_LIBRARY_VISIBILITY Linker : public MachOTool {
bool NeedsTempPath(const InputInfoList &Inputs) const;
void AddLinkArgs(Compilation &C, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
- const InputInfoList &Inputs, unsigned Version[5]) const;
+ const InputInfoList &Inputs, unsigned Version[5],
+ bool LinkerIsLLD, bool LinkerIsLLDDarwinNew) const;
public:
Linker(const ToolChain &TC) : MachOTool("darwin::Linker", "linker", TC) {}
@@ -436,7 +438,11 @@ public:
bool isMacosxVersionLT(unsigned V0, unsigned V1 = 0, unsigned V2 = 0) const {
assert(isTargetMacOS() && getTriple().isMacOSX() &&
"Unexpected call for non OS X target!");
- VersionTuple MinVers = getTriple().getMinimumSupportedOSVersion();
+ // The effective triple might not be initialized yet, so construct a
+ // pseudo-effective triple to get the minimum supported OS version.
+ VersionTuple MinVers =
+ llvm::Triple(getTriple().getArchName(), "apple", "macos")
+ .getMinimumSupportedOSVersion();
return (!MinVers.empty() && MinVers > TargetVersion
? MinVers
: TargetVersion) < VersionTuple(V0, V1, V2);
@@ -487,17 +493,18 @@ public:
return !(isTargetMacOS() && isMacosxVersionLT(10, 6));
}
- unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ LangOptions::StackProtectorMode
+ GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
// Stack protectors default to on for user code on 10.5,
// and for everything in 10.6 and beyond
if (isTargetIOSBased() || isTargetWatchOSBased())
- return 1;
+ return LangOptions::SSPOn;
else if (isTargetMacOS() && !isMacosxVersionLT(10, 6))
- return 1;
+ return LangOptions::SSPOn;
else if (isTargetMacOS() && !isMacosxVersionLT(10, 5) && !KernelOrKext)
- return 1;
+ return LangOptions::SSPOn;
- return 0;
+ return LangOptions::SSPOff;
}
void CheckObjCARC() const override;
diff --git a/clang/lib/Driver/ToolChains/DragonFly.cpp b/clang/lib/Driver/ToolChains/DragonFly.cpp
index 88dd0c899d8a..9568b47e89e6 100644
--- a/clang/lib/Driver/ToolChains/DragonFly.cpp
+++ b/clang/lib/Driver/ToolChains/DragonFly.cpp
@@ -45,8 +45,9 @@ void dragonfly::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -119,11 +120,11 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- CmdArgs.push_back("-L/usr/lib/gcc50");
+ CmdArgs.push_back("-L/usr/lib/gcc80");
if (!Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("-rpath");
- CmdArgs.push_back("/usr/lib/gcc50");
+ CmdArgs.push_back("/usr/lib/gcc80");
}
if (D.CCCIsCXX()) {
@@ -170,8 +171,9 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
@@ -187,7 +189,7 @@ DragonFly::DragonFly(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back(getDriver().Dir + "/../lib");
getFilePaths().push_back("/usr/lib");
- getFilePaths().push_back("/usr/lib/gcc50");
+ getFilePaths().push_back("/usr/lib/gcc80");
}
Tool *DragonFly::buildAssembler() const {
diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp
index 80f6db7ea642..669d911de18a 100644
--- a/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/clang/lib/Driver/ToolChains/Flang.cpp
@@ -19,22 +19,36 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+void Flang::AddPreprocessingOptions(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ Args.AddAllArgs(CmdArgs, {options::OPT_D, options::OPT_U, options::OPT_I});
+}
+
void Flang::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const ArgList &Args, const char *LinkingOutput) const {
const auto &TC = getToolChain();
- const llvm::Triple &Triple = TC.getEffectiveTriple();
- const std::string &TripleStr = Triple.getTriple();
+ // TODO: Once code-generation is available, this will need to be commented
+ // out.
+ // const llvm::Triple &Triple = TC.getEffectiveTriple();
+ // const std::string &TripleStr = Triple.getTriple();
ArgStringList CmdArgs;
+ // Invoke ourselves in -fc1 mode.
CmdArgs.push_back("-fc1");
- CmdArgs.push_back("-triple");
- CmdArgs.push_back(Args.MakeArgString(TripleStr));
+ // TODO: Once code-generation is available, this will need to be commented
+ // out.
+ // Add the "effective" target triple.
+ // CmdArgs.push_back("-triple");
+ // CmdArgs.push_back(Args.MakeArgString(TripleStr));
if (isa<PreprocessJobAction>(JA)) {
- CmdArgs.push_back("-E");
+ if (C.getArgs().hasArg(options::OPT_test_io))
+ CmdArgs.push_back("-test-io");
+ else
+ CmdArgs.push_back("-E");
} else if (isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) {
if (JA.getType() == types::TY_Nothing) {
CmdArgs.push_back("-fsyntax-only");
@@ -57,6 +71,14 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
assert(false && "Unexpected action class for Flang tool.");
}
+ const InputInfo &Input = Inputs[0];
+ types::ID InputType = Input.getType();
+
+ // Add preprocessing options like -I, -D, etc. if we are using the
+ // preprocessor (i.e. skip when dealing with e.g. binary files).
+ if (types::getPreprocessedType(InputType) != types::TY_INVALID)
+ AddPreprocessingOptions(Args, CmdArgs);
+
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -64,16 +86,18 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- const InputInfo &Input = Inputs[0];
assert(Input.isFilename() && "Invalid input.");
CmdArgs.push_back(Input.getFilename());
const auto& D = C.getDriver();
- const char* Exec = Args.MakeArgString(D.GetProgramPath("flang", TC));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
+ // TODO: Replace flang-new with flang once the new driver replaces the
+ // throwaway driver
+ const char *Exec = Args.MakeArgString(D.GetProgramPath("flang-new", TC));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
}
-Flang::Flang(const ToolChain &TC) : Tool("flang", "flang frontend", TC) {}
+Flang::Flang(const ToolChain &TC) : Tool("flang-new", "flang frontend", TC) {}
Flang::~Flang() {}
diff --git a/clang/lib/Driver/ToolChains/Flang.h b/clang/lib/Driver/ToolChains/Flang.h
index 19e3a8c28f7e..83b79505e0a9 100644
--- a/clang/lib/Driver/ToolChains/Flang.h
+++ b/clang/lib/Driver/ToolChains/Flang.h
@@ -23,6 +23,15 @@ namespace tools {
/// Flang compiler tool.
class LLVM_LIBRARY_VISIBILITY Flang : public Tool {
+private:
+ /// Extract preprocessing options from the driver arguments and add them to
+ /// the preprocessor command arguments.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void AddPreprocessingOptions(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
public:
Flang(const ToolChain &TC);
~Flang() override;
diff --git a/clang/lib/Driver/ToolChains/FreeBSD.cpp b/clang/lib/Driver/ToolChains/FreeBSD.cpp
index 909ac5e99212..4524d9b8a85c 100644
--- a/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -42,6 +42,7 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--32");
break;
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
CmdArgs.push_back("-a32");
break;
case llvm::Triple::mips:
@@ -128,8 +129,9 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -191,6 +193,11 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-m");
CmdArgs.push_back("elf32ppc_fbsd");
break;
+ case llvm::Triple::ppcle:
+ CmdArgs.push_back("-m");
+ // Use generic -- only usage is for freestanding.
+ CmdArgs.push_back("elf32lppc");
+ break;
case llvm::Triple::mips:
CmdArgs.push_back("-m");
CmdArgs.push_back("elf32btsmip_fbsd");
@@ -359,8 +366,9 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
/// FreeBSD - FreeBSD tool chain which can call as(1) and ld(1) directly.
@@ -372,7 +380,7 @@ FreeBSD::FreeBSD(const Driver &D, const llvm::Triple &Triple,
// When targeting 32-bit platforms, look for '/usr/lib32/crt1.o' and fall
// back to '/usr/lib' if it doesn't exist.
if ((Triple.getArch() == llvm::Triple::x86 || Triple.isMIPS32() ||
- Triple.getArch() == llvm::Triple::ppc) &&
+ Triple.isPPC32()) &&
D.getVFS().exists(getDriver().SysRoot + "/usr/lib32/crt1.o"))
getFilePaths().push_back(getDriver().SysRoot + "/usr/lib32");
else
diff --git a/clang/lib/Driver/ToolChains/Fuchsia.cpp b/clang/lib/Driver/ToolChains/Fuchsia.cpp
index 94e025e3055a..8e086010a984 100644
--- a/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -95,6 +95,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::string Dyld = D.DyldPrefix;
if (SanArgs.needsAsanRt() && SanArgs.needsSharedRt())
Dyld += "asan/";
+ if (SanArgs.needsTsanRt() && SanArgs.needsSharedRt())
+ Dyld += "tsan/";
Dyld += "ld.so.1";
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back(Args.MakeArgString(Dyld));
@@ -165,7 +167,7 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
/// Fuchsia - Fuchsia tool chain which can call as(1) and ld(1) directly.
@@ -208,6 +210,23 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
.flag("+fsanitize=address")
.flag("-fexceptions")
.flag("+fno-exceptions"));
+ // Use the relative vtables ABI.
+ // TODO: Remove these multilibs once relative vtables are enabled by default
+ // for Fuchsia.
+ Multilibs.push_back(Multilib("relative-vtables", {}, {}, 4)
+ .flag("+fexperimental-relative-c++-abi-vtables"));
+ Multilibs.push_back(Multilib("relative-vtables+noexcept", {}, {}, 5)
+ .flag("+fexperimental-relative-c++-abi-vtables")
+ .flag("-fexceptions")
+ .flag("+fno-exceptions"));
+ Multilibs.push_back(Multilib("relative-vtables+asan", {}, {}, 6)
+ .flag("+fexperimental-relative-c++-abi-vtables")
+ .flag("+fsanitize=address"));
+ Multilibs.push_back(Multilib("relative-vtables+asan+noexcept", {}, {}, 7)
+ .flag("+fexperimental-relative-c++-abi-vtables")
+ .flag("+fsanitize=address")
+ .flag("-fexceptions")
+ .flag("+fno-exceptions"));
Multilibs.FilterOut([&](const Multilib &M) {
std::vector<std::string> RD = FilePaths(M);
return std::all_of(RD.begin(), RD.end(), [&](std::string P) {
@@ -220,6 +239,13 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions, true),
"fexceptions", Flags);
addMultilibFlag(getSanitizerArgs().needsAsanRt(), "fsanitize=address", Flags);
+
+ addMultilibFlag(
+ Args.hasFlag(options::OPT_fexperimental_relative_cxx_abi_vtables,
+ options::OPT_fno_experimental_relative_cxx_abi_vtables,
+ /*default=*/false),
+ "fexperimental-relative-c++-abi-vtables", Flags);
+
Multilibs.setFilePathsCallback(FilePaths);
if (Multilibs.select(Flags, SelectedMultilib))
@@ -349,6 +375,7 @@ SanitizerMask Fuchsia::getSupportedSanitizers() const {
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Scudo;
+ Res |= SanitizerKind::Thread;
return Res;
}
diff --git a/clang/lib/Driver/ToolChains/Fuchsia.h b/clang/lib/Driver/ToolChains/Fuchsia.h
index 3159a54bda06..07adf9b7101d 100644
--- a/clang/lib/Driver/ToolChains/Fuchsia.h
+++ b/clang/lib/Driver/ToolChains/Fuchsia.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_FUCHSIA_H
#include "Gnu.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -59,8 +60,9 @@ public:
return llvm::DebuggerKind::GDB;
}
- unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
- return 2; // SSPStrong
+ LangOptions::StackProtectorMode
+ GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return LangOptions::SSPStrong;
}
std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp
index c8a7fce07ef1..1d8a3cdce92a 100644
--- a/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -38,10 +38,10 @@ using tools::addMultilibFlag;
using tools::addPathIfExists;
static bool forwardToGCC(const Option &O) {
- // Don't forward inputs from the original command line. They are added from
- // InputInfoList.
- return O.getKind() != Option::InputClass &&
- !O.hasFlag(options::DriverOption) && !O.hasFlag(options::LinkerInput);
+ // LinkerInput options have been forwarded. Don't duplicate.
+ if (O.hasFlag(options::LinkerInput))
+ return false;
+ return O.matches(options::OPT_Link_Group) || O.hasFlag(options::LinkOption);
}
// Switch CPU names not recognized by GNU assembler to a close CPU that it does
@@ -76,23 +76,6 @@ void tools::gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
// to get to the assembler.
A->claim();
- // Don't forward any -g arguments to assembly steps.
- if (isa<AssembleJobAction>(JA) &&
- A->getOption().matches(options::OPT_g_Group))
- continue;
-
- // Don't forward any -W arguments to assembly and link steps.
- if ((isa<AssembleJobAction>(JA) || isa<LinkJobAction>(JA)) &&
- A->getOption().matches(options::OPT_W_Group))
- continue;
-
- // Don't forward -mno-unaligned-access since GCC doesn't understand
- // it and because it doesn't affect the assembly or link steps.
- if ((isa<AssembleJobAction>(JA) || isa<LinkJobAction>(JA)) &&
- (A->getOption().matches(options::OPT_munaligned_access) ||
- A->getOption().matches(options::OPT_mno_unaligned_access)))
- continue;
-
A->render(Args, CmdArgs);
}
}
@@ -116,6 +99,7 @@ void tools::gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
break;
case llvm::Triple::x86:
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
CmdArgs.push_back("-m32");
break;
case llvm::Triple::x86_64:
@@ -188,8 +172,9 @@ void tools::gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
GCCName = "gcc";
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
void tools::gcc::Preprocessor::RenderExtraToolArgs(
@@ -270,7 +255,13 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
case llvm::Triple::thumbeb:
return isArmBigEndian(T, Args) ? "armelfb_linux_eabi" : "armelf_linux_eabi";
case llvm::Triple::ppc:
- return "elf32ppclinux";
+ if (T.isOSLinux())
+ return "elf32ppclinux";
+ return "elf32ppc";
+ case llvm::Triple::ppcle:
+ if (T.isOSLinux())
+ return "elf32lppclinux";
+ return "elf32lppc";
case llvm::Triple::ppc64:
return "elf64ppc";
case llvm::Triple::ppc64le:
@@ -358,12 +349,17 @@ void tools::gnutools::StaticLibTool::ConstructJob(
// Silence warnings when linking C code with a C++ '-stdlib' argument.
Args.ClaimAllArgs(options::OPT_stdlib_EQ);
- // GNU ar tool command "ar <options> <output_file> <input_files>".
+ // ar tool command "llvm-ar <options> <output_file> <input_files>".
ArgStringList CmdArgs;
// Create and insert file members with a deterministic index.
CmdArgs.push_back("rcsD");
CmdArgs.push_back(Output.getFilename());
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+
+ for (const auto &II : Inputs) {
+ if (II.isFilename()) {
+ CmdArgs.push_back(II.getFilename());
+ }
+ }
// Delete old output archive file if it already exists before generating a new
// archive file.
@@ -376,8 +372,9 @@ void tools::gnutools::StaticLibTool::ConstructJob(
}
const char *Exec = Args.MakeArgString(getToolChain().GetStaticLibToolPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -568,6 +565,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
+ addLinkerCompressDebugSectionsOption(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
// The profile runtime also needs access to system libraries.
getToolChain().addProfileRTLibs(Args, CmdArgs);
@@ -673,8 +671,9 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_T);
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
void tools::gnutools::Assembler::ConstructJob(Compilation &C,
@@ -732,13 +731,23 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
case llvm::Triple::ppc: {
CmdArgs.push_back("-a32");
CmdArgs.push_back("-mppc");
+ CmdArgs.push_back("-mbig-endian");
CmdArgs.push_back(
ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
break;
}
+ case llvm::Triple::ppcle: {
+ CmdArgs.push_back("-a32");
+ CmdArgs.push_back("-mppc");
+ CmdArgs.push_back("-mlittle-endian");
+ CmdArgs.push_back(
+ ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
+ break;
+ }
case llvm::Triple::ppc64: {
CmdArgs.push_back("-a64");
CmdArgs.push_back("-mppc64");
+ CmdArgs.push_back("-mbig-endian");
CmdArgs.push_back(
ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
break;
@@ -941,8 +950,9 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath(DefaultAssembler));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
// Handle the debug info splitting at object creation time if we're
// creating an object.
@@ -950,7 +960,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
if (Args.hasArg(options::OPT_gsplit_dwarf) &&
getToolChain().getTriple().isOSLinux())
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
- SplitDebugName(Args, Inputs[0], Output));
+ SplitDebugName(JA, Args, Inputs[0], Output));
}
namespace {
@@ -1552,15 +1562,21 @@ static bool findMSP430Multilibs(const Driver &D,
StringRef Path, const ArgList &Args,
DetectedMultilibs &Result) {
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
- Multilib MSP430Multilib = makeMultilib("/430");
+ Multilib WithoutExceptions = makeMultilib("/430").flag("-exceptions");
+ Multilib WithExceptions = makeMultilib("/430/exceptions").flag("+exceptions");
+
// FIXME: when clang starts to support msp430x ISA additional logic
// to select between multilib must be implemented
// Multilib MSP430xMultilib = makeMultilib("/large");
- Result.Multilibs.push_back(MSP430Multilib);
+ Result.Multilibs.push_back(WithoutExceptions);
+ Result.Multilibs.push_back(WithExceptions);
Result.Multilibs.FilterOut(NonExistent);
Multilib::flags_list Flags;
+ addMultilibFlag(Args.hasFlag(options::OPT_fexceptions,
+ options::OPT_fno_exceptions, false),
+ "exceptions", Flags);
if (Result.Multilibs.select(Flags, Result.SelectedMultilib))
return true;
@@ -1944,27 +1960,36 @@ void Generic_GCC::GCCInstallationDetector::init(
// installation available. GCC installs are ranked by version number.
Version = GCCVersion::Parse("0.0.0");
for (const std::string &Prefix : Prefixes) {
- if (!D.getVFS().exists(Prefix))
+ auto &VFS = D.getVFS();
+ if (!VFS.exists(Prefix))
continue;
for (StringRef Suffix : CandidateLibDirs) {
const std::string LibDir = Prefix + Suffix.str();
- if (!D.getVFS().exists(LibDir))
+ if (!VFS.exists(LibDir))
continue;
+ // Maybe filter out <libdir>/gcc and <libdir>/gcc-cross.
+ bool GCCDirExists = VFS.exists(LibDir + "/gcc");
+ bool GCCCrossDirExists = VFS.exists(LibDir + "/gcc-cross");
// Try to match the exact target triple first.
- ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, TargetTriple.str());
+ ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, TargetTriple.str(),
+ false, GCCDirExists, GCCCrossDirExists);
// Try rest of possible triples.
for (StringRef Candidate : ExtraTripleAliases) // Try these first.
- ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate);
+ ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate, false,
+ GCCDirExists, GCCCrossDirExists);
for (StringRef Candidate : CandidateTripleAliases)
- ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate);
+ ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate, false,
+ GCCDirExists, GCCCrossDirExists);
}
for (StringRef Suffix : CandidateBiarchLibDirs) {
const std::string LibDir = Prefix + Suffix.str();
- if (!D.getVFS().exists(LibDir))
+ if (!VFS.exists(LibDir))
continue;
+ bool GCCDirExists = VFS.exists(LibDir + "/gcc");
+ bool GCCCrossDirExists = VFS.exists(LibDir + "/gcc-cross");
for (StringRef Candidate : CandidateBiarchTripleAliases)
- ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate,
- /*NeedsBiarchSuffix=*/ true);
+ ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate, true,
+ GCCDirExists, GCCCrossDirExists);
}
}
}
@@ -2026,6 +2051,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
// Non-Solaris is much simpler - most systems just go with "/usr".
if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux) {
// Yet, still look for RHEL devtoolsets.
+ Prefixes.push_back("/opt/rh/devtoolset-10/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-9/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-8/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-7/root/usr");
@@ -2125,6 +2151,11 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
// On 32-bit PowerPC systems running SUSE Linux, gcc is configured as a
// 64-bit compiler which defaults to "-m32", hence "powerpc64-suse-linux".
"powerpc64-suse-linux", "powerpc-montavista-linuxspe"};
+ static const char *const PPCLELibDirs[] = {"/lib32", "/lib"};
+ static const char *const PPCLETriples[] = {"powerpcle-linux-gnu",
+ "powerpcle-unknown-linux-gnu",
+ "powerpcle-linux-musl"};
+
static const char *const PPC64LibDirs[] = {"/lib64", "/lib"};
static const char *const PPC64Triples[] = {
"powerpc64-linux-gnu", "powerpc64-unknown-linux-gnu",
@@ -2132,7 +2163,8 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const PPC64LELibDirs[] = {"/lib64", "/lib"};
static const char *const PPC64LETriples[] = {
"powerpc64le-linux-gnu", "powerpc64le-unknown-linux-gnu",
- "powerpc64le-suse-linux", "ppc64le-redhat-linux"};
+ "powerpc64le-none-linux-gnu", "powerpc64le-suse-linux",
+ "ppc64le-redhat-linux"};
static const char *const RISCV32LibDirs[] = {"/lib32", "/lib"};
static const char *const RISCV32Triples[] = {"riscv32-unknown-linux-gnu",
@@ -2365,6 +2397,12 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
BiarchLibDirs.append(begin(PPC64LibDirs), end(PPC64LibDirs));
BiarchTripleAliases.append(begin(PPC64Triples), end(PPC64Triples));
break;
+ case llvm::Triple::ppcle:
+ LibDirs.append(begin(PPCLELibDirs), end(PPCLELibDirs));
+ TripleAliases.append(begin(PPCLETriples), end(PPCLETriples));
+ BiarchLibDirs.append(begin(PPC64LELibDirs), end(PPC64LELibDirs));
+ BiarchTripleAliases.append(begin(PPC64LETriples), end(PPC64LETriples));
+ break;
case llvm::Triple::ppc64:
LibDirs.append(begin(PPC64LibDirs), end(PPC64LibDirs));
TripleAliases.append(begin(PPC64Triples), end(PPC64Triples));
@@ -2374,6 +2412,8 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
case llvm::Triple::ppc64le:
LibDirs.append(begin(PPC64LELibDirs), end(PPC64LELibDirs));
TripleAliases.append(begin(PPC64LETriples), end(PPC64LETriples));
+ BiarchLibDirs.append(begin(PPCLELibDirs), end(PPCLELibDirs));
+ BiarchTripleAliases.append(begin(PPCLETriples), end(PPCLETriples));
break;
case llvm::Triple::riscv32:
LibDirs.append(begin(RISCV32LibDirs), end(RISCV32LibDirs));
@@ -2455,7 +2495,7 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
const llvm::Triple &TargetTriple, const ArgList &Args,
const std::string &LibDir, StringRef CandidateTriple,
- bool NeedsBiarchSuffix) {
+ bool NeedsBiarchSuffix, bool GCCDirExists, bool GCCCrossDirExists) {
llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
// Locations relative to the system lib directory where GCC's triple-specific
// directories might reside.
@@ -2469,11 +2509,10 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
bool Active;
} Suffixes[] = {
// This is the normal place.
- {"gcc/" + CandidateTriple.str(), "../..", true},
+ {"gcc/" + CandidateTriple.str(), "../..", GCCDirExists},
// Debian puts cross-compilers in gcc-cross.
- {"gcc-cross/" + CandidateTriple.str(), "../..",
- TargetTriple.getOS() != llvm::Triple::Solaris},
+ {"gcc-cross/" + CandidateTriple.str(), "../..", GCCCrossDirExists},
// The Freescale PPC SDK has the gcc libraries in
// <sysroot>/usr/lib/<triple>/x.y.z so have a look there as well. Only do
@@ -2540,6 +2579,9 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooConfigs(
const llvm::Triple &TargetTriple, const ArgList &Args,
const SmallVectorImpl<StringRef> &CandidateTriples,
const SmallVectorImpl<StringRef> &CandidateBiarchTriples) {
+ if (!D.getVFS().exists(D.SysRoot + GentooConfigDir))
+ return false;
+
for (StringRef CandidateTriple : CandidateTriples) {
if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple))
return true;
@@ -2556,7 +2598,7 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
const llvm::Triple &TargetTriple, const ArgList &Args,
StringRef CandidateTriple, bool NeedsBiarchSuffix) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
- D.getVFS().getBufferForFile(D.SysRoot + "/etc/env.d/gcc/config-" +
+ D.getVFS().getBufferForFile(D.SysRoot + GentooConfigDir + "/config-" +
CandidateTriple.str());
if (File) {
SmallVector<StringRef, 2> Lines;
@@ -2568,7 +2610,7 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
continue;
// Process the config file pointed to by CURRENT.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ConfigFile =
- D.getVFS().getBufferForFile(D.SysRoot + "/etc/env.d/gcc/" +
+ D.getVFS().getBufferForFile(D.SysRoot + GentooConfigDir + "/" +
Line.str());
std::pair<StringRef, StringRef> ActiveVersion = Line.rsplit('-');
// List of paths to scan for libraries.
@@ -2662,7 +2704,17 @@ void Generic_GCC::printVerboseInfo(raw_ostream &OS) const {
}
bool Generic_GCC::IsUnwindTablesDefault(const ArgList &Args) const {
- return getArch() == llvm::Triple::x86_64;
+ switch (getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::x86_64:
+ return true;
+ default:
+ return false;
+ }
}
bool Generic_GCC::isPICDefault() const {
@@ -2697,6 +2749,7 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
case llvm::Triple::riscv32:
diff --git a/clang/lib/Driver/ToolChains/Gnu.h b/clang/lib/Driver/ToolChains/Gnu.h
index 52690ab4b83c..90d3bafc1f00 100644
--- a/clang/lib/Driver/ToolChains/Gnu.h
+++ b/clang/lib/Driver/ToolChains/Gnu.h
@@ -212,6 +212,9 @@ public:
/// The set of multilibs that the detected installation supports.
MultilibSet Multilibs;
+ // Gentoo-specific toolchain configurations are stored here.
+ const std::string GentooConfigDir = "/etc/env.d/gcc";
+
public:
explicit GCCInstallationDetector(const Driver &D) : IsValid(false), D(D) {}
void init(const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args,
@@ -267,7 +270,8 @@ public:
const llvm::opt::ArgList &Args,
const std::string &LibDir,
StringRef CandidateTriple,
- bool NeedsBiarchSuffix = false);
+ bool NeedsBiarchSuffix, bool GCCDirExists,
+ bool GCCCrossDirExists);
bool ScanGentooConfigs(const llvm::Triple &TargetTriple,
const llvm::opt::ArgList &Args,
diff --git a/clang/lib/Driver/ToolChains/HIP.cpp b/clang/lib/Driver/ToolChains/HIP.cpp
index 7d17f809690e..a84c0c257033 100644
--- a/clang/lib/Driver/ToolChains/HIP.cpp
+++ b/clang/lib/Driver/ToolChains/HIP.cpp
@@ -11,10 +11,12 @@
#include "CommonArgs.h"
#include "InputInfo.h"
#include "clang/Basic/Cuda.h"
+#include "clang/Basic/TargetID.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "llvm/Support/Alignment.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TargetParser.h"
@@ -32,6 +34,7 @@ using namespace llvm::opt;
#endif
namespace {
+const unsigned HIPCodeObjectAlign = 4096;
static void addBCLib(const Driver &D, const ArgList &Args,
ArgStringList &CmdArgs, ArgStringList LibraryPaths,
@@ -68,7 +71,7 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
// Extract all the -m options
std::vector<llvm::StringRef> Features;
- amdgpu::getAMDGPUTargetFeatures(D, Args, Features);
+ amdgpu::getAMDGPUTargetFeatures(D, TC.getTriple(), Args, Features);
// Add features to mattr such as cumode
std::string MAttrString = "-plugin-opt=-mattr=";
@@ -88,12 +91,14 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
if (C.getDriver().isSaveTempsEnabled())
LldArgs.push_back("-save-temps");
+ addLinkerCompressDebugSectionsOption(TC, Args, LldArgs);
+
LldArgs.append({"-o", Output.getFilename()});
for (auto Input : Inputs)
LldArgs.push_back(Input.getFilename());
const char *Lld = Args.MakeArgString(getToolChain().GetProgramPath("lld"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Lld, LldArgs, Inputs));
+ Lld, LldArgs, Inputs, Output));
}
// Construct a clang-offload-bundler command to bundle code objects for
@@ -105,29 +110,39 @@ void AMDGCN::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
// for different GPU archs.
ArgStringList BundlerArgs;
BundlerArgs.push_back(Args.MakeArgString("-type=o"));
+ BundlerArgs.push_back(
+ Args.MakeArgString("-bundle-align=" + Twine(HIPCodeObjectAlign)));
// ToDo: Remove the dummy host binary entry which is required by
// clang-offload-bundler.
std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
std::string BundlerInputArg = "-inputs=" NULL_FILE;
+ // TODO: Change the bundle ID as requested by HIP runtime.
+ // For code object version 2 and 3, the offload kind in bundle ID is 'hip'
+ // for backward compatibility. For code object version 4 and greater, the
+ // offload kind in bundle ID is 'hipv4'.
+ std::string OffloadKind = "hip";
for (const auto &II : Inputs) {
const auto* A = II.getAction();
- BundlerTargetArg = BundlerTargetArg + ",hip-amdgcn-amd-amdhsa-" +
+ BundlerTargetArg = BundlerTargetArg + "," + OffloadKind +
+ "-amdgcn-amd-amdhsa--" +
StringRef(A->getOffloadingArch()).str();
BundlerInputArg = BundlerInputArg + "," + II.getFilename();
}
BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
- auto BundlerOutputArg = Args.MakeArgString(
- std::string("-outputs=").append(std::string(OutputFileName)));
+ std::string Output = std::string(OutputFileName);
+ auto BundlerOutputArg =
+ Args.MakeArgString(std::string("-outputs=").append(Output));
BundlerArgs.push_back(BundlerOutputArg);
const char *Bundler = Args.MakeArgString(
T.getToolChain().GetProgramPath("clang-offload-bundler"));
- C.addCommand(std::make_unique<Command>(JA, T, ResponseFileSupport::None(),
- Bundler, BundlerArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, T, ResponseFileSupport::None(), Bundler, BundlerArgs, Inputs,
+ InputInfo(&JA, Args.MakeArgString(Output))));
}
/// Add Generated HIP Object File which has device images embedded into the
@@ -169,10 +184,10 @@ void AMDGCN::Linker::constructGenerateObjFileFromHIPFatBinary(
ObjStream << "# HIP Object Generator\n";
ObjStream << "# *** Automatically generated by Clang ***\n";
ObjStream << " .type __hip_fatbin,@object\n";
- ObjStream << " .section .hip_fatbin,\"aMS\",@progbits,1\n";
- ObjStream << " .data\n";
+ ObjStream << " .section .hip_fatbin,\"a\",@progbits\n";
ObjStream << " .globl __hip_fatbin\n";
- ObjStream << " .p2align 3\n";
+ ObjStream << " .p2align " << llvm::Log2(llvm::Align(HIPCodeObjectAlign))
+ << "\n";
ObjStream << "__hip_fatbin:\n";
ObjStream << " .incbin \"" << BundleFile << "\"\n";
ObjStream.flush();
@@ -197,7 +212,7 @@ void AMDGCN::Linker::constructGenerateObjFileFromHIPFatBinary(
McinFile, "--filetype=obj"};
const char *Mc = Args.MakeArgString(TC.GetProgramPath("llvm-mc"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Mc, McArgs, Inputs));
+ Mc, McArgs, Inputs, Output));
}
// For amdgcn the inputs of the linker job are device bitcode and output is
@@ -232,7 +247,7 @@ void HIPToolChain::addClangTargetOptions(
Action::OffloadKind DeviceOffloadingKind) const {
HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
- StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
+ StringRef GpuArch = getGPUArch(DriverArgs);
assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
(void) GpuArch;
assert(DeviceOffloadingKind == Action::OFK_HIP &&
@@ -260,10 +275,6 @@ void HIPToolChain::addClangTargetOptions(
CC1Args.push_back(DriverArgs.MakeArgStringRef(ArgStr));
}
- if (DriverArgs.hasFlag(options::OPT_fgpu_allow_device_init,
- options::OPT_fno_gpu_allow_device_init, false))
- CC1Args.push_back("-fgpu-allow-device-init");
-
CC1Args.push_back("-fcuda-allow-variadic-functions");
// Default to "hidden" visibility, as object level linking will not be
@@ -322,6 +333,17 @@ void HIPToolChain::addClangTargetOptions(
RocmInstallation.addCommonBitcodeLibCC1Args(
DriverArgs, CC1Args, LibDeviceFile, Wave64, DAZ, FiniteOnly,
UnsafeMathOpt, FastRelaxedMath, CorrectSqrt);
+
+ // Add instrument lib.
+ auto InstLib =
+ DriverArgs.getLastArgValue(options::OPT_gpu_instrument_lib_EQ);
+ if (InstLib.empty())
+ return;
+ if (llvm::sys::fs::exists(InstLib)) {
+ CC1Args.push_back("-mlink-builtin-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(InstLib));
+ } else
+ getDriver().Diag(diag::err_drv_no_such_file) << InstLib;
}
}
@@ -337,12 +359,14 @@ HIPToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
const OptTable &Opts = getDriver().getOpts();
for (Arg *A : Args) {
- DAL->append(A);
+ if (!shouldSkipArgument(A))
+ DAL->append(A);
}
if (!BoundArch.empty()) {
DAL->eraseArg(options::OPT_mcpu_EQ);
DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_mcpu_EQ), BoundArch);
+ checkTargetID(*DAL);
}
return DAL;
diff --git a/clang/lib/Driver/ToolChains/Hexagon.cpp b/clang/lib/Driver/ToolChains/Hexagon.cpp
index 775f6e1094fa..fb54f73bcd4c 100644
--- a/clang/lib/Driver/ToolChains/Hexagon.cpp
+++ b/clang/lib/Driver/ToolChains/Hexagon.cpp
@@ -189,8 +189,9 @@ void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
}
auto *Exec = Args.MakeArgString(HTC.GetProgramPath(AsName));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
void hexagon::Linker::RenderExtraToolArgs(const JobAction &JA,
@@ -407,8 +408,9 @@ void hexagon::Linker::ConstructJob(Compilation &C, const JobAction &JA,
LinkingOutput);
const char *Exec = Args.MakeArgString(HTC.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
// Hexagon tools end.
diff --git a/clang/lib/Driver/ToolChains/InterfaceStubs.cpp b/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
index f7c11421e809..57acf338df5c 100644
--- a/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
+++ b/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
@@ -56,7 +56,7 @@ void Merger::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
Args.MakeArgString(Merger), CmdArgs,
- Inputs));
+ Inputs, Output));
}
} // namespace ifstool
} // namespace tools
diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
index 180350476c38..e17a6bd4bdd2 100644
--- a/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
@@ -142,6 +142,10 @@ std::string Linux::getMultiarchTriple(const Driver &D,
if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnu"))
return "powerpc-linux-gnu";
break;
+ case llvm::Triple::ppcle:
+ if (D.getVFS().exists(SysRoot + "/lib/powerpcle-linux-gnu"))
+ return "powerpcle-linux-gnu";
+ break;
case llvm::Triple::ppc64:
if (D.getVFS().exists(SysRoot + "/lib/powerpc64-linux-gnu"))
return "powerpc64-linux-gnu";
@@ -185,17 +189,17 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
return Triple.isArch32Bit() ? "lib" : "lib64";
}
- // It happens that only x86 and PPC use the 'lib32' variant of oslibdir, and
- // using that variant while targeting other architectures causes problems
- // because the libraries are laid out in shared system roots that can't cope
- // with a 'lib32' library search path being considered. So we only enable
- // them when we know we may need it.
+ // It happens that only x86, PPC and SPARC use the 'lib32' variant of
+ // oslibdir, and using that variant while targeting other architectures causes
+ // problems because the libraries are laid out in shared system roots that
+ // can't cope with a 'lib32' library search path being considered. So we only
+ // enable them when we know we may need it.
//
// FIXME: This is a bit of a hack. We should really unify this code for
// reasoning about oslibdir spellings with the lib dir spellings in the
// GCCInstallationDetector, but that is a more significant refactoring.
- if (Triple.getArch() == llvm::Triple::x86 ||
- Triple.getArch() == llvm::Triple::ppc)
+ if (Triple.getArch() == llvm::Triple::x86 || Triple.isPPC32() ||
+ Triple.getArch() == llvm::Triple::sparc)
return "lib32";
if (Triple.getArch() == llvm::Triple::x86_64 &&
@@ -232,6 +236,15 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
ExtraOpts.push_back("relro");
}
+ if (Triple.isAndroid() && Triple.isAndroidVersionLT(29)) {
+ // https://github.com/android/ndk/issues/1196
+ // The unwinder used by the crash handler on versions of Android prior to
+ // API 29 did not correctly handle binaries built with rosegment, which is
+ // enabled by default for LLD. Android only supports LLD, so it's not an
+ // issue that this flag is not accepted by other linkers.
+ ExtraOpts.push_back("--no-rosegment");
+ }
+
// Android ARM/AArch64 use max-page-size=4096 to reduce VMA usage. Note, lld
// from 11 onwards default max-page-size to 65536 for both ARM and AArch64.
if ((Triple.isARM() || Triple.isAArch64()) && Triple.isAndroid()) {
@@ -495,6 +508,10 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
LibDir = "lib";
Loader = "ld.so.1";
break;
+ case llvm::Triple::ppcle:
+ LibDir = "lib";
+ Loader = "ld.so.1";
+ break;
case llvm::Triple::ppc64:
LibDir = "lib64";
Loader =
@@ -642,6 +659,8 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
const StringRef PPCMultiarchIncludeDirs[] = {
"/usr/include/powerpc-linux-gnu",
"/usr/include/powerpc-linux-gnuspe"};
+ const StringRef PPCLEMultiarchIncludeDirs[] = {
+ "/usr/include/powerpcle-linux-gnu"};
const StringRef PPC64MultiarchIncludeDirs[] = {
"/usr/include/powerpc64-linux-gnu"};
const StringRef PPC64LEMultiarchIncludeDirs[] = {
@@ -715,6 +734,9 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
case llvm::Triple::ppc:
MultiarchIncludeDirs = PPCMultiarchIncludeDirs;
break;
+ case llvm::Triple::ppcle:
+ MultiarchIncludeDirs = PPCLEMultiarchIncludeDirs;
+ break;
case llvm::Triple::ppc64:
MultiarchIncludeDirs = PPC64MultiarchIncludeDirs;
break;
diff --git a/clang/lib/Driver/ToolChains/MSP430.cpp b/clang/lib/Driver/ToolChains/MSP430.cpp
index b0bc2e014b48..f3ed9967a81a 100644
--- a/clang/lib/Driver/ToolChains/MSP430.cpp
+++ b/clang/lib/Driver/ToolChains/MSP430.cpp
@@ -128,7 +128,7 @@ MSP430ToolChain::MSP430ToolChain(const Driver &D, const llvm::Triple &Triple,
}
SmallString<128> SysRootDir(computeSysRoot());
- llvm::sys::path::append(SysRootDir, "lib", MultilibSuf);
+ llvm::sys::path::append(SysRootDir, "msp430-elf", "lib", MultilibSuf);
addPathIfExists(D, SysRootDir, getFilePaths());
}
@@ -138,10 +138,9 @@ std::string MSP430ToolChain::computeSysRoot() const {
SmallString<128> Dir;
if (GCCInstallation.isValid())
- llvm::sys::path::append(Dir, GCCInstallation.getParentLibPath(), "..",
- GCCInstallation.getTriple().str());
+ llvm::sys::path::append(Dir, GCCInstallation.getParentLibPath(), "..");
else
- llvm::sys::path::append(Dir, getDriver().Dir, "..", getTriple().str());
+ llvm::sys::path::append(Dir, getDriver().Dir, "..");
return std::string(Dir.str());
}
@@ -153,7 +152,7 @@ void MSP430ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
SmallString<128> Dir(computeSysRoot());
- llvm::sys::path::append(Dir, "include");
+ llvm::sys::path::append(Dir, "msp430-elf", "include");
addSystemInclude(DriverArgs, CC1Args, Dir.str());
}
@@ -180,6 +179,87 @@ Tool *MSP430ToolChain::buildLinker() const {
return new tools::msp430::Linker(*this);
}
+void msp430::Linker::AddStartFiles(bool UseExceptions, const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const ToolChain &ToolChain = getToolChain();
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
+ const char *crtbegin = UseExceptions ? "crtbegin.o" : "crtbegin_no_eh.o";
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+}
+
+void msp430::Linker::AddDefaultLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ const ToolChain &ToolChain = getToolChain();
+ const Driver &D = ToolChain.getDriver();
+
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back(Args.MakeArgString(getHWMultLib(Args)));
+ CmdArgs.push_back("-lc");
+ AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
+ CmdArgs.push_back("-lcrt");
+
+ if (Args.hasArg(options::OPT_msim)) {
+ CmdArgs.push_back("-lsim");
+
+ // msp430-sim.ld relies on __crt0_call_exit being implicitly .refsym-ed
+ // in main() by msp430-gcc.
+ // This workaround should work seamlessly unless the compilation unit that
+ // contains main() is compiled by clang and then passed to
+ // gcc compiler driver for linkage.
+ CmdArgs.push_back("--undefined=__crt0_call_exit");
+ } else
+ CmdArgs.push_back("-lnosys");
+
+ CmdArgs.push_back("--end-group");
+ AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
+}
+
+void msp430::Linker::AddEndFiles(bool UseExceptions, const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const ToolChain &ToolChain = getToolChain();
+ const Driver &D = ToolChain.getDriver();
+
+ const char *crtend = UseExceptions ? "crtend.o" : "crtend_no_eh.o";
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
+}
+
+static void AddSspArgs(const ArgList &Args, ArgStringList &CmdArgs) {
+ Arg *SspFlag = Args.getLastArg(
+ options::OPT_fno_stack_protector, options::OPT_fstack_protector,
+ options::OPT_fstack_protector_all, options::OPT_fstack_protector_strong);
+
+ if (SspFlag &&
+ !SspFlag->getOption().matches(options::OPT_fno_stack_protector)) {
+ CmdArgs.push_back("-lssp_nonshared");
+ CmdArgs.push_back("-lssp");
+ }
+}
+
+static void AddImplicitLinkerScript(const std::string SysRoot,
+ const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if (Args.hasArg(options::OPT_T))
+ return;
+
+ if (Args.hasArg(options::OPT_msim)) {
+ CmdArgs.push_back("-Tmsp430-sim.ld");
+ return;
+ }
+
+ const Arg *MCUArg = Args.getLastArg(options::OPT_mmcu_EQ);
+ if (!MCUArg)
+ return;
+
+ SmallString<128> MCULinkerScriptPath(SysRoot);
+ llvm::sys::path::append(MCULinkerScriptPath, "include");
+ // -L because <mcu>.ld INCLUDEs <mcu>_symbols.ld
+ CmdArgs.push_back(Args.MakeArgString("-L" + MCULinkerScriptPath));
+ CmdArgs.push_back(
+ Args.MakeArgString("-T" + StringRef(MCUArg->getValue()) + ".ld"));
+}
+
void msp430::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -189,45 +269,50 @@ void msp430::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const Driver &D = ToolChain.getDriver();
std::string Linker = ToolChain.GetProgramPath(getShortName());
ArgStringList CmdArgs;
-
- if (!D.SysRoot.empty())
- CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+ bool UseExceptions = Args.hasFlag(options::OPT_fexceptions,
+ options::OPT_fno_exceptions, false);
+ bool UseStartAndEndFiles = !Args.hasArg(options::OPT_nostdlib, options::OPT_r,
+ options::OPT_nostartfiles);
+
+ if (Args.hasArg(options::OPT_mrelax))
+ CmdArgs.push_back("--relax");
+ if (!Args.hasArg(options::OPT_r, options::OPT_g_Group))
+ CmdArgs.push_back("--gc-sections");
+
+ Args.AddAllArgs(CmdArgs, {
+ options::OPT_e,
+ options::OPT_n,
+ options::OPT_s,
+ options::OPT_t,
+ options::OPT_u,
+ });
+
+ if (UseStartAndEndFiles)
+ AddStartFiles(UseExceptions, Args, CmdArgs);
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
-
- if (!Args.hasArg(options::OPT_T)) {
- if (const Arg *MCUArg = Args.getLastArg(options::OPT_mmcu_EQ))
- CmdArgs.push_back(
- Args.MakeArgString("-T" + StringRef(MCUArg->getValue()) + ".ld"));
- } else {
- Args.AddAllArgs(CmdArgs, options::OPT_T);
- }
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o")));
- }
-
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
- CmdArgs.push_back("--start-group");
- CmdArgs.push_back(Args.MakeArgString(getHWMultLib(Args)));
- CmdArgs.push_back("-lgcc");
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- CmdArgs.push_back("-lc");
- CmdArgs.push_back("-lcrt");
- CmdArgs.push_back("-lnosys");
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_r,
+ options::OPT_nodefaultlibs)) {
+ AddSspArgs(Args, CmdArgs);
+ AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
+ if (!Args.hasArg(options::OPT_nolibc)) {
+ AddDefaultLibs(Args, CmdArgs);
+ AddImplicitLinkerScript(D.SysRoot, Args, CmdArgs);
+ }
}
- CmdArgs.push_back("--end-group");
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
- }
+ if (UseStartAndEndFiles)
+ AddEndFiles(UseExceptions, Args, CmdArgs);
+
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(
- std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
- Args.MakeArgString(Linker), CmdArgs, Inputs));
+
+ Args.AddAllArgs(CmdArgs, options::OPT_T);
+
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker),
+ CmdArgs, Inputs, Output));
}
diff --git a/clang/lib/Driver/ToolChains/MSP430.h b/clang/lib/Driver/ToolChains/MSP430.h
index 58fd158cd12f..3789e7442a23 100644
--- a/clang/lib/Driver/ToolChains/MSP430.h
+++ b/clang/lib/Driver/ToolChains/MSP430.h
@@ -40,6 +40,11 @@ public:
bool isPIEDefault() const override { return false; }
bool isPICDefaultForced() const override { return true; }
+ UnwindLibType
+ GetUnwindLibType(const llvm::opt::ArgList &Args) const override {
+ return UNW_None;
+ }
+
protected:
Tool *buildLinker() const override;
@@ -61,6 +66,14 @@ public:
const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
+
+private:
+ void AddStartFiles(bool UseExceptions, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddDefaultLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddEndFiles(bool UseExceptions, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
};
void getMSP430TargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
diff --git a/clang/lib/Driver/ToolChains/MSVC.cpp b/clang/lib/Driver/ToolChains/MSVC.cpp
index 6b3c00e2ab6d..f4b7a57e0bb7 100644
--- a/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -66,6 +66,20 @@ using namespace llvm::opt;
static bool getSystemRegistryString(const char *keyPath, const char *valueName,
std::string &value, std::string *phValue);
+// Check command line arguments to try and find a toolchain.
+static bool
+findVCToolChainViaCommandLine(const ArgList &Args, std::string &Path,
+ MSVCToolChain::ToolsetLayout &VSLayout) {
+ // Don't validate the input; trust the value supplied by the user.
+ // The primary motivation is to prevent unnecessary file and registry access.
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsdir)) {
+ Path = A->getValue();
+ VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
+ return true;
+ }
+ return false;
+}
+
// Check various environment variables to try and find a toolchain.
static bool findVCToolChainViaEnvironment(std::string &Path,
MSVCToolChain::ToolsetLayout &VSLayout) {
@@ -319,8 +333,10 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(std::string("-out:") + Output.getFilename()));
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles) &&
- !C.getDriver().IsCLMode())
+ !C.getDriver().IsCLMode()) {
CmdArgs.push_back("-defaultlib:libcmt");
+ CmdArgs.push_back("-defaultlib:oldnames");
+ }
if (!llvm::sys::Process::GetEnv("LIB")) {
// If the VC environment hasn't been configured (perhaps because the user
@@ -366,8 +382,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-nologo");
- if (Args.hasArg(options::OPT_g_Group, options::OPT__SLASH_Z7,
- options::OPT__SLASH_Zd))
+ if (Args.hasArg(options::OPT_g_Group, options::OPT__SLASH_Z7))
CmdArgs.push_back("-debug");
// Pass on /Brepro if it was passed to the compiler.
@@ -592,9 +607,9 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
linkPath = TC.GetProgramPath(Linker.str().c_str());
}
- auto LinkCmd =
- std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileUTF16(),
- Args.MakeArgString(linkPath), CmdArgs, Inputs);
+ auto LinkCmd = std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF16(),
+ Args.MakeArgString(linkPath), CmdArgs, Inputs, Output);
if (!Environment.empty())
LinkCmd->setEnvironment(Environment);
C.addCommand(std::move(LinkCmd));
@@ -734,9 +749,9 @@ std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
CmdArgs.push_back(Fo);
std::string Exec = FindVisualStudioExecutable(getToolChain(), "cl.exe");
- return std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileUTF16(),
- Args.MakeArgString(Exec), CmdArgs, Inputs);
+ return std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF16(), Args.MakeArgString(Exec),
+ CmdArgs, Inputs, Output);
}
MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
@@ -747,11 +762,12 @@ MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
- // Check the environment first, since that's probably the user telling us
- // what they want to use.
- // Failing that, just try to find the newest Visual Studio version we can
- // and use its default VC toolchain.
- findVCToolChainViaEnvironment(VCToolChainPath, VSLayout) ||
+ // Check the command line first, that's the user explicitly telling us what to
+ // use. Check the environment next, in case we're being invoked from a VS
+ // command prompt. Failing that, just try to find the newest Visual Studio
+ // version we can and use its default VC toolchain.
+ findVCToolChainViaCommandLine(Args, VCToolChainPath, VSLayout) ||
+ findVCToolChainViaEnvironment(VCToolChainPath, VSLayout) ||
findVCToolChainViaSetupConfig(VCToolChainPath, VSLayout) ||
findVCToolChainViaRegistry(VCToolChainPath, VSLayout);
}
@@ -1263,15 +1279,18 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
// Honor %INCLUDE%. It should know essential search paths with vcvarsall.bat.
- if (llvm::Optional<std::string> cl_include_dir =
- llvm::sys::Process::GetEnv("INCLUDE")) {
- SmallVector<StringRef, 8> Dirs;
- StringRef(*cl_include_dir)
- .split(Dirs, ";", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
- for (StringRef Dir : Dirs)
- addSystemInclude(DriverArgs, CC1Args, Dir);
- if (!Dirs.empty())
- return;
+ // Skip if the user expressly set a vctoolsdir
+ if (!DriverArgs.getLastArg(options::OPT__SLASH_vctoolsdir)) {
+ if (llvm::Optional<std::string> cl_include_dir =
+ llvm::sys::Process::GetEnv("INCLUDE")) {
+ SmallVector<StringRef, 8> Dirs;
+ StringRef(*cl_include_dir)
+ .split(Dirs, ";", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ for (StringRef Dir : Dirs)
+ addSystemInclude(DriverArgs, CC1Args, Dir);
+ if (!Dirs.empty())
+ return;
+ }
}
// When built with access to the proper Windows APIs, try to actually find
diff --git a/clang/lib/Driver/ToolChains/MinGW.cpp b/clang/lib/Driver/ToolChains/MinGW.cpp
index a1a1b413fb6c..f6cead412236 100644
--- a/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -51,11 +51,11 @@ void tools::MinGW::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
if (Args.hasArg(options::OPT_gsplit_dwarf))
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
- SplitDebugName(Args, Inputs[0], Output));
+ SplitDebugName(JA, Args, Inputs[0], Output));
}
void tools::MinGW::Linker::AddLibGCC(const ArgList &Args,
@@ -164,17 +164,14 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
const char *OutputFile = Output.getFilename();
// GCC implicitly adds an .exe extension if it is given an output file name
- // that lacks an extension. However, GCC only does this when actually
- // running on windows, not when operating as a cross compiler. As some users
- // have come to rely on this behaviour, try to replicate it.
-#ifdef _WIN32
- if (!llvm::sys::path::has_extension(OutputFile))
+ // that lacks an extension.
+ // GCC used to do this only when the compiler itself runs on windows, but
+ // since GCC 8 it does the same when cross compiling as well.
+ if (!llvm::sys::path::has_extension(OutputFile)) {
CmdArgs.push_back(Args.MakeArgString(Twine(OutputFile) + ".exe"));
- else
+ OutputFile = CmdArgs.back();
+ } else
CmdArgs.push_back(OutputFile);
-#else
- CmdArgs.push_back(OutputFile);
-#endif
Args.AddAllArgs(CmdArgs, options::OPT_e);
// FIXME: add -N, -n flags
@@ -322,8 +319,9 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
const char *Exec = Args.MakeArgString(TC.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
}
// Simplified from Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple.
@@ -493,6 +491,7 @@ SanitizerMask toolchains::MinGW::getSupportedSanitizers() const {
Res |= SanitizerKind::Address;
Res |= SanitizerKind::PointerCompare;
Res |= SanitizerKind::PointerSubtract;
+ Res |= SanitizerKind::Vptr;
return Res;
}
diff --git a/clang/lib/Driver/ToolChains/Minix.cpp b/clang/lib/Driver/ToolChains/Minix.cpp
index d0314795620c..44479a24ebe7 100644
--- a/clang/lib/Driver/ToolChains/Minix.cpp
+++ b/clang/lib/Driver/ToolChains/Minix.cpp
@@ -36,8 +36,9 @@ void tools::minix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -89,8 +90,9 @@ void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
/// Minix - Minix tool chain which can call as(1) and ld(1) directly.
diff --git a/clang/lib/Driver/ToolChains/Myriad.cpp b/clang/lib/Driver/ToolChains/Myriad.cpp
index 84fe4748b6fa..ab0df5d8f168 100644
--- a/clang/lib/Driver/ToolChains/Myriad.cpp
+++ b/clang/lib/Driver/ToolChains/Myriad.cpp
@@ -79,7 +79,7 @@ void tools::SHAVE::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(getToolChain().GetProgramPath("moviCompile"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
Args.MakeArgString(Exec), CmdArgs,
- Inputs));
+ Inputs, Output));
}
void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -115,7 +115,7 @@ void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(getToolChain().GetProgramPath("moviAsm"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
Args.MakeArgString(Exec), CmdArgs,
- Inputs));
+ Inputs, Output));
}
void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -200,9 +200,9 @@ void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::string Exec =
Args.MakeArgString(TC.GetProgramPath("sparc-myriad-rtems-ld"));
- C.addCommand(
- std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
- Args.MakeArgString(Exec), CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Exec),
+ CmdArgs, Inputs, Output));
}
MyriadToolChain::MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
diff --git a/clang/lib/Driver/ToolChains/NaCl.cpp b/clang/lib/Driver/ToolChains/NaCl.cpp
index 15a773675299..8a150c394753 100644
--- a/clang/lib/Driver/ToolChains/NaCl.cpp
+++ b/clang/lib/Driver/ToolChains/NaCl.cpp
@@ -193,8 +193,9 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
/// NaCl Toolchain
diff --git a/clang/lib/Driver/ToolChains/NetBSD.cpp b/clang/lib/Driver/ToolChains/NetBSD.cpp
index 253ee6ce0f72..48bf061c6650 100644
--- a/clang/lib/Driver/ToolChains/NetBSD.cpp
+++ b/clang/lib/Driver/ToolChains/NetBSD.cpp
@@ -103,8 +103,9 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString((getToolChain().GetProgramPath("as")));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -338,8 +339,9 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
/// NetBSD - NetBSD tool chain which can call as(1) and ld(1) directly.
diff --git a/clang/lib/Driver/ToolChains/OpenBSD.cpp b/clang/lib/Driver/ToolChains/OpenBSD.cpp
index 9c1a9c5f8228..f155d74632f9 100644
--- a/clang/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/clang/lib/Driver/ToolChains/OpenBSD.cpp
@@ -10,10 +10,12 @@
#include "Arch/Mips.h"
#include "Arch/Sparc.h"
#include "CommonArgs.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/Path.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -41,15 +43,6 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-many");
break;
- case llvm::Triple::sparc:
- case llvm::Triple::sparcel: {
- CmdArgs.push_back("-32");
- std::string CPU = getCPUName(Args, getToolChain().getTriple());
- CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
- AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
- break;
- }
-
case llvm::Triple::sparcv9: {
CmdArgs.push_back("-64");
std::string CPU = getCPUName(Args, getToolChain().getTriple());
@@ -89,8 +82,9 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -228,8 +222,9 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
}
SanitizerMask OpenBSD::getSupportedSanitizers() const {
@@ -256,6 +251,45 @@ OpenBSD::OpenBSD(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
}
+void OpenBSD::AddClangSystemIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> Dir(D.ResourceDir);
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
+ }
+ return;
+ }
+
+ addExternCSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/usr/include");
+}
+
+void OpenBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/v1");
+}
+
void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
bool Profiling = Args.hasArg(options::OPT_pg);
@@ -264,17 +298,18 @@ void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
CmdArgs.push_back(Profiling ? "-lc++abi_p" : "-lc++abi");
}
+std::string OpenBSD::getCompilerRT(const ArgList &Args,
+ StringRef Component,
+ FileType Type) const {
+ SmallString<128> Path(getDriver().SysRoot);
+ llvm::sys::path::append(Path, "/usr/lib/libcompiler_rt.a");
+ return std::string(Path.str());
+}
+
Tool *OpenBSD::buildAssembler() const {
return new tools::openbsd::Assembler(*this);
}
Tool *OpenBSD::buildLinker() const { return new tools::openbsd::Linker(*this); }
-void OpenBSD::addClangTargetOptions(const ArgList &DriverArgs,
- ArgStringList &CC1Args,
- Action::OffloadKind) const {
- // Support for .init_array is still new (Aug 2016).
- if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
- options::OPT_fno_use_init_array, false))
- CC1Args.push_back("-fno-use-init-array");
-}
+bool OpenBSD::HasNativeLLVMSupport() const { return true; }
diff --git a/clang/lib/Driver/ToolChains/OpenBSD.h b/clang/lib/Driver/ToolChains/OpenBSD.h
index 897eee57ab68..4932ed5c609c 100644
--- a/clang/lib/Driver/ToolChains/OpenBSD.h
+++ b/clang/lib/Driver/ToolChains/OpenBSD.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_OPENBSD_H
#include "Gnu.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -54,6 +55,8 @@ public:
OpenBSD(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+ bool HasNativeLLVMSupport() const override;
+
bool IsMathErrnoDefault() const override { return false; }
bool IsObjCNonFragileABIDefault() const override { return true; }
bool isPIEDefault() const override { return true; }
@@ -65,21 +68,26 @@ public:
return ToolChain::CST_Libcxx;
}
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
- unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
- return 2;
+ std::string getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
+ FileType Type = ToolChain::FT_Static) const override;
+
+ LangOptions::StackProtectorMode
+ GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return LangOptions::SSPStrong;
}
unsigned GetDefaultDwarfVersion() const override { return 2; }
SanitizerMask getSupportedSanitizers() const override;
- void
- addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const override;
-
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
diff --git a/clang/lib/Driver/ToolChains/PS4CPU.cpp b/clang/lib/Driver/ToolChains/PS4CPU.cpp
index 6dc81899cbaa..383b0c50d410 100644
--- a/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -66,8 +66,9 @@ void tools::PS4cpu::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("orbis-as"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
}
static void AddPS4SanitizerArgs(const ToolChain &TC, ArgStringList &CmdArgs) {
@@ -152,8 +153,9 @@ void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
}
toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
@@ -237,9 +239,8 @@ SanitizerMask toolchains::PS4CPU::getSupportedSanitizers() const {
}
void toolchains::PS4CPU::addClangTargetOptions(
- const ArgList &DriverArgs,
- ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadingKind) const {
+ const ArgList &DriverArgs, ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
// PS4 does not use init arrays.
if (DriverArgs.hasArg(options::OPT_fuse_init_array)) {
Arg *A = DriverArgs.getLastArg(options::OPT_fuse_init_array);
@@ -248,4 +249,36 @@ void toolchains::PS4CPU::addClangTargetOptions(
}
CC1Args.push_back("-fno-use-init-array");
+
+ const Arg *A =
+ DriverArgs.getLastArg(options::OPT_fvisibility_from_dllstorageclass,
+ options::OPT_fno_visibility_from_dllstorageclass);
+ if (!A ||
+ A->getOption().matches(options::OPT_fvisibility_from_dllstorageclass)) {
+ CC1Args.push_back("-fvisibility-from-dllstorageclass");
+
+ if (DriverArgs.hasArg(options::OPT_fvisibility_dllexport_EQ))
+ DriverArgs.AddLastArg(CC1Args, options::OPT_fvisibility_dllexport_EQ);
+ else
+ CC1Args.push_back("-fvisibility-dllexport=protected");
+
+ if (DriverArgs.hasArg(options::OPT_fvisibility_nodllstorageclass_EQ))
+ DriverArgs.AddLastArg(CC1Args,
+ options::OPT_fvisibility_nodllstorageclass_EQ);
+ else
+ CC1Args.push_back("-fvisibility-nodllstorageclass=hidden");
+
+ if (DriverArgs.hasArg(options::OPT_fvisibility_externs_dllimport_EQ))
+ DriverArgs.AddLastArg(CC1Args,
+ options::OPT_fvisibility_externs_dllimport_EQ);
+ else
+ CC1Args.push_back("-fvisibility-externs-dllimport=default");
+
+ if (DriverArgs.hasArg(
+ options::OPT_fvisibility_externs_nodllstorageclass_EQ))
+ DriverArgs.AddLastArg(
+ CC1Args, options::OPT_fvisibility_externs_nodllstorageclass_EQ);
+ else
+ CC1Args.push_back("-fvisibility-externs-nodllstorageclass=default");
+ }
}
diff --git a/clang/lib/Driver/ToolChains/PS4CPU.h b/clang/lib/Driver/ToolChains/PS4CPU.h
index 968be015d411..5f5d0e57d4ea 100644
--- a/clang/lib/Driver/ToolChains/PS4CPU.h
+++ b/clang/lib/Driver/ToolChains/PS4CPU.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PS4CPU_H
#include "Gnu.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -73,8 +74,9 @@ public:
bool HasNativeLLVMSupport() const override;
bool isPICDefault() const override;
- unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
- return 2; // SSPStrong
+ LangOptions::StackProtectorMode
+ GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return LangOptions::SSPStrong;
}
llvm::DebuggerKind getDefaultDebuggerTuning() const override {
diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
index cc912d94cb92..0dc12c7a84b5 100644
--- a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
+++ b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -31,6 +31,21 @@ static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs,
addPathIfExists(D, InstallPath + Path, Paths);
}
+// This function tests whether a gcc installation is present either
+// through gcc-toolchain argument or in the same prefix where clang
+// is installed. This helps decide whether to instantiate this toolchain
+// or Baremetal toolchain.
+bool RISCVToolChain::hasGCCToolchain(const Driver &D,
+ const llvm::opt::ArgList &Args) {
+ if (Args.getLastArg(options::OPT_gcc_toolchain))
+ return true;
+
+ SmallString<128> GCCDir;
+ llvm::sys::path::append(GCCDir, D.Dir, "..", D.getTargetTriple(),
+ "lib/crt0.o");
+ return llvm::sys::fs::exists(GCCDir);
+}
+
/// RISCV Toolchain
RISCVToolChain::RISCVToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
@@ -191,8 +206,8 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(
- std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
- Args.MakeArgString(Linker), CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker),
+ CmdArgs, Inputs, Output));
}
// RISCV tools end.
diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.h b/clang/lib/Driver/ToolChains/RISCVToolchain.h
index 4734aee5f1ab..62099bee0404 100644
--- a/clang/lib/Driver/ToolChains/RISCVToolchain.h
+++ b/clang/lib/Driver/ToolChains/RISCVToolchain.h
@@ -21,6 +21,7 @@ public:
RISCVToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+ static bool hasGCCToolchain(const Driver &D, const llvm::opt::ArgList &Args);
bool IsIntegratedAssemblerDefault() const override { return true; }
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
diff --git a/clang/lib/Driver/ToolChains/ROCm.h b/clang/lib/Driver/ToolChains/ROCm.h
index 962c72fedfe0..21e62a465d7b 100644
--- a/clang/lib/Driver/ToolChains/ROCm.h
+++ b/clang/lib/Driver/ToolChains/ROCm.h
@@ -13,7 +13,6 @@
#include "clang/Basic/LLVM.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Triple.h"
@@ -103,12 +102,8 @@ private:
CorrectlyRoundedSqrt.isValid();
}
- // GPU architectures for which we have raised an error in
- // CheckRocmVersionSupportsArch.
- mutable llvm::SmallSet<CudaArch, 4> ArchsWithBadVersion;
-
void scanLibDevicePath(llvm::StringRef Path);
- void ParseHIPVersionFile(llvm::StringRef V);
+ bool parseHIPVersionFile(llvm::StringRef V);
SmallVector<Candidate, 4> getInstallationPathCandidates();
public:
@@ -124,12 +119,6 @@ public:
bool DAZ, bool FiniteOnly, bool UnsafeMathOpt,
bool FastRelaxedMath, bool CorrectSqrt) const;
- /// Emit an error if Version does not support the given Arch.
- ///
- /// If either Version or Arch is unknown, does not emit an error. Emits at
- /// most one error per Arch.
- void CheckRocmVersionSupportsArch(CudaArch Arch) const;
-
/// Check whether we detected a valid HIP runtime.
bool hasHIPRuntime() const { return HasHIPRuntime; }
diff --git a/clang/lib/Driver/ToolChains/Solaris.cpp b/clang/lib/Driver/ToolChains/Solaris.cpp
index b8fdc87478bc..4ed4d839ad10 100644
--- a/clang/lib/Driver/ToolChains/Solaris.cpp
+++ b/clang/lib/Driver/ToolChains/Solaris.cpp
@@ -42,7 +42,7 @@ void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -152,7 +152,7 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
static StringRef getSolarisLibSuffix(const llvm::Triple &Triple) {
diff --git a/clang/lib/Driver/ToolChains/Solaris.h b/clang/lib/Driver/ToolChains/Solaris.h
index b79e626ef38d..fbac92c2c0f3 100644
--- a/clang/lib/Driver/ToolChains/Solaris.h
+++ b/clang/lib/Driver/ToolChains/Solaris.h
@@ -65,6 +65,11 @@ public:
SanitizerMask getSupportedSanitizers() const override;
unsigned GetDefaultDwarfVersion() const override { return 2; }
+ const char *getDefaultLinker() const override {
+ // clang currently uses Solaris ld-only options.
+ return "/usr/bin/ld";
+ }
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
diff --git a/clang/lib/Driver/ToolChains/VEToolchain.cpp b/clang/lib/Driver/ToolChains/VEToolchain.cpp
index 6ea405c0269c..e28f340f9aad 100644
--- a/clang/lib/Driver/ToolChains/VEToolchain.cpp
+++ b/clang/lib/Driver/ToolChains/VEToolchain.cpp
@@ -102,14 +102,37 @@ void VEToolChain::addClangTargetOptions(const ArgList &DriverArgs,
void VEToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- // TODO upstream VE libc++ patches
- llvm_unreachable("The VE target has no C++ stdlib for Clang yet");
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc) ||
+ DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+ if (const char *cl_include_dir = getenv("NCC_CPLUS_INCLUDE_PATH")) {
+ SmallVector<StringRef, 4> Dirs;
+ const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
+ StringRef(cl_include_dir).split(Dirs, StringRef(EnvPathSeparatorStr));
+ ArrayRef<StringRef> DirVec(Dirs);
+ addSystemIncludes(DriverArgs, CC1Args, DirVec);
+ } else {
+ SmallString<128> P(getDriver().ResourceDir);
+ llvm::sys::path::append(P, "include/c++/v1");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
}
void VEToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- // TODO upstream VE libc++ patches
- llvm_unreachable("The VE target has no C++ stdlib for Clang yet");
+ assert((GetCXXStdlibType(Args) == ToolChain::CST_Libcxx) &&
+ "Only -lc++ (aka libxx) is supported in this toolchain.");
+
+ tools::addArchSpecificRPath(*this, Args, CmdArgs);
+
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lunwind");
+ // libc++ requires -lpthread under glibc environment
+ CmdArgs.push_back("-lpthread");
+ // libunwind requires -ldl under glibc environment
+ CmdArgs.push_back("-ldl");
}
llvm::ExceptionHandling
diff --git a/clang/lib/Driver/ToolChains/VEToolchain.h b/clang/lib/Driver/ToolChains/VEToolchain.h
index 59069c0a7595..b330331ca84e 100644
--- a/clang/lib/Driver/ToolChains/VEToolchain.h
+++ b/clang/lib/Driver/ToolChains/VEToolchain.h
@@ -26,6 +26,7 @@ protected:
Tool *buildLinker() const override;
public:
+ bool IsIntegratedAssemblerDefault() const override { return true; }
bool isPICDefault() const override;
bool isPIEDefault() const override;
bool isPICDefaultForced() const override;
diff --git a/clang/lib/Driver/ToolChains/WebAssembly.cpp b/clang/lib/Driver/ToolChains/WebAssembly.cpp
index 10168736400f..6b654886e774 100644
--- a/clang/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/clang/lib/Driver/ToolChains/WebAssembly.cpp
@@ -114,8 +114,9 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Linker, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Linker, CmdArgs, Inputs, Output));
// When optimizing, if wasm-opt is available, run it.
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
@@ -139,7 +140,7 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Output.getFilename());
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::AtFileCurCP(), WasmOpt, CmdArgs,
- Inputs));
+ Inputs, Output));
}
}
}
@@ -243,6 +244,27 @@ void WebAssembly::addClangTargetOptions(const ArgList &DriverArgs,
CC1Args.push_back("+sign-ext");
}
+ if (!DriverArgs.hasFlag(options::OPT_mmutable_globals,
+ options::OPT_mno_mutable_globals, false)) {
+ // -fPIC implies +mutable-globals because the PIC ABI used by the linker
+ // depends on importing and exporting mutable globals.
+ llvm::Reloc::Model RelocationModel;
+ unsigned PICLevel;
+ bool IsPIE;
+ std::tie(RelocationModel, PICLevel, IsPIE) =
+ ParsePICArgs(*this, DriverArgs);
+ if (RelocationModel == llvm::Reloc::PIC_) {
+ if (DriverArgs.hasFlag(options::OPT_mno_mutable_globals,
+ options::OPT_mmutable_globals, false)) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fPIC"
+ << "-mno-mutable-globals";
+ }
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+mutable-globals");
+ }
+ }
+
if (DriverArgs.getLastArg(options::OPT_fwasm_exceptions)) {
// '-fwasm-exceptions' is not compatible with '-mno-exception-handling'
if (DriverArgs.hasFlag(options::OPT_mno_exception_handing,
diff --git a/clang/lib/Driver/ToolChains/XCore.cpp b/clang/lib/Driver/ToolChains/XCore.cpp
index 5030c73c7d82..5f94f83d3691 100644
--- a/clang/lib/Driver/ToolChains/XCore.cpp
+++ b/clang/lib/Driver/ToolChains/XCore.cpp
@@ -53,7 +53,7 @@ void tools::XCore::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
void tools::XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -82,7 +82,7 @@ void tools::XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
/// XCore tool chain
diff --git a/clang/lib/Driver/ToolChains/ZOS.cpp b/clang/lib/Driver/ToolChains/ZOS.cpp
new file mode 100644
index 000000000000..f921227076a5
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/ZOS.cpp
@@ -0,0 +1,33 @@
+//===--- ZOS.cpp - z/OS ToolChain Implementations ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ZOS.h"
+#include "CommonArgs.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace llvm::opt;
+using namespace clang;
+
+ZOS::ZOS(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : ToolChain(D, Triple, Args) {}
+
+ZOS::~ZOS() {}
+
+void ZOS::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const {
+ // Pass "-faligned-alloc-unavailable" only when the user hasn't manually
+ // enabled or disabled aligned allocations.
+ if (!DriverArgs.hasArgNoClaim(options::OPT_faligned_allocation,
+ options::OPT_fno_aligned_allocation))
+ CC1Args.push_back("-faligned-alloc-unavailable");
+}
diff --git a/clang/lib/Driver/ToolChains/ZOS.h b/clang/lib/Driver/ToolChains/ZOS.h
new file mode 100644
index 000000000000..cace85d6da77
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/ZOS.h
@@ -0,0 +1,40 @@
+//===--- ZOS.h - z/OS ToolChain Implementations -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ZOS_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ZOS_H
+
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY ZOS : public ToolChain {
+public:
+ ZOS(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ ~ZOS() override;
+
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault() const override { return false; }
+ bool isPICDefaultForced() const override { return false; }
+
+ bool IsIntegratedAssemblerDefault() const override { return true; }
+
+ void addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const override;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ZOS_H
diff --git a/clang/lib/Driver/Types.cpp b/clang/lib/Driver/Types.cpp
index 399e26d8d64a..e898334c3227 100644
--- a/clang/lib/Driver/Types.cpp
+++ b/clang/lib/Driver/Types.cpp
@@ -141,7 +141,7 @@ bool types::isAcceptedByClang(ID Id) {
case TY_CXXHeader: case TY_PP_CXXHeader:
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
case TY_CXXModule: case TY_PP_CXXModule:
- case TY_AST: case TY_ModuleFile:
+ case TY_AST: case TY_ModuleFile: case TY_PCH:
case TY_LLVM_IR: case TY_LLVM_BC:
return true;
}
@@ -325,10 +325,12 @@ types::getCompilationPhases(const clang::driver::Driver &Driver,
// Filter to compiler mode. When the compiler is run as a preprocessor then
// compilation is not an option.
// -S runs the compiler in Assembly listing mode.
+ // -test-io is used by Flang to run InputOutputTest action
if (Driver.CCCIsCPP() || DAL.getLastArg(options::OPT_E) ||
DAL.getLastArg(options::OPT__SLASH_EP) ||
DAL.getLastArg(options::OPT_M, options::OPT_MM) ||
- DAL.getLastArg(options::OPT__SLASH_P))
+ DAL.getLastArg(options::OPT__SLASH_P) ||
+ DAL.getLastArg(options::OPT_test_io))
LastPhase = phases::Preprocess;
// --precompile only runs up to precompilation.
diff --git a/clang/lib/Driver/XRayArgs.cpp b/clang/lib/Driver/XRayArgs.cpp
index f00c3906df97..b44509ad3b88 100644
--- a/clang/lib/Driver/XRayArgs.cpp
+++ b/clang/lib/Driver/XRayArgs.cpp
@@ -186,6 +186,21 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
Modes.push_back(std::string(M));
}
+ if (const Arg *A = Args.getLastArg(options::OPT_fxray_function_groups)) {
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, XRayFunctionGroups) || XRayFunctionGroups < 1)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ }
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fxray_selected_function_group)) {
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, XRaySelectedFunctionGroup) ||
+ XRaySelectedFunctionGroup < 0 ||
+ XRaySelectedFunctionGroup >= XRayFunctionGroups)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ }
+
// Then we want to sort and unique the modes we've collected.
llvm::sort(Modes);
Modes.erase(std::unique(Modes.begin(), Modes.end()), Modes.end());
@@ -210,6 +225,17 @@ void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
if (!XRayFunctionIndex)
CmdArgs.push_back("-fno-xray-function-index");
+ if (XRayFunctionGroups > 1) {
+ CmdArgs.push_back(Args.MakeArgString(Twine("-fxray-function-groups=") +
+ Twine(XRayFunctionGroups)));
+ }
+
+ if (XRaySelectedFunctionGroup != 0) {
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fxray-selected-function-group=") +
+ Twine(XRaySelectedFunctionGroup)));
+ }
+
CmdArgs.push_back(Args.MakeArgString(Twine(XRayInstructionThresholdOption) +
Twine(InstructionThreshold)));
diff --git a/clang/lib/Edit/EditedSource.cpp b/clang/lib/Edit/EditedSource.cpp
index b3bc63a903a7..74e6005faeb0 100644
--- a/clang/lib/Edit/EditedSource.cpp
+++ b/clang/lib/Edit/EditedSource.cpp
@@ -59,7 +59,7 @@ void EditedSource::finishedCommit() {
SourceLocation ExpLoc;
MacroArgUse ArgUse;
std::tie(ExpLoc, ArgUse) = ExpArg;
- auto &ArgUses = ExpansionToArgMap[ExpLoc.getRawEncoding()];
+ auto &ArgUses = ExpansionToArgMap[ExpLoc];
if (llvm::find(ArgUses, ArgUse) == ArgUses.end())
ArgUses.push_back(ArgUse);
}
@@ -82,7 +82,7 @@ bool EditedSource::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
SourceLocation ExpLoc;
MacroArgUse ArgUse;
deconstructMacroArgLoc(OrigLoc, ExpLoc, ArgUse);
- auto I = ExpansionToArgMap.find(ExpLoc.getRawEncoding());
+ auto I = ExpansionToArgMap.find(ExpLoc);
if (I != ExpansionToArgMap.end() &&
find_if(I->second, [&](const MacroArgUse &U) {
return ArgUse.Identifier == U.Identifier &&
diff --git a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
index 6f4a880b649a..7565626cba99 100644
--- a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -1085,6 +1085,8 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
case CK_BooleanToSignedIntegral:
llvm_unreachable("OpenCL-specific cast in Objective-C?");
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
diff --git a/clang/lib/Format/BreakableToken.cpp b/clang/lib/Format/BreakableToken.cpp
index 15fbe3b6515d..f179ac64de17 100644
--- a/clang/lib/Format/BreakableToken.cpp
+++ b/clang/lib/Format/BreakableToken.cpp
@@ -25,7 +25,7 @@
namespace clang {
namespace format {
-static const char *const Blanks = " \t\v\f\r";
+static constexpr StringRef Blanks = " \t\v\f\r";
static bool IsBlank(char C) {
switch (C) {
case ' ':
@@ -41,25 +41,27 @@ static bool IsBlank(char C) {
static StringRef getLineCommentIndentPrefix(StringRef Comment,
const FormatStyle &Style) {
- static const char *const KnownCStylePrefixes[] = {"///<", "//!<", "///", "//",
- "//!"};
- static const char *const KnownTextProtoPrefixes[] = {"//", "#", "##", "###",
- "####"};
- ArrayRef<const char *> KnownPrefixes(KnownCStylePrefixes);
+ static constexpr StringRef KnownCStylePrefixes[] = {"///<", "//!<", "///",
+ "//!", "//:", "//"};
+ static constexpr StringRef KnownTextProtoPrefixes[] = {"####", "###", "##",
+ "//", "#"};
+ ArrayRef<StringRef> KnownPrefixes(KnownCStylePrefixes);
if (Style.Language == FormatStyle::LK_TextProto)
KnownPrefixes = KnownTextProtoPrefixes;
- StringRef LongestPrefix;
+ assert(std::is_sorted(KnownPrefixes.begin(), KnownPrefixes.end(),
+ [](StringRef Lhs, StringRef Rhs) noexcept {
+ return Lhs.size() > Rhs.size();
+ }));
+
for (StringRef KnownPrefix : KnownPrefixes) {
if (Comment.startswith(KnownPrefix)) {
- size_t PrefixLength = KnownPrefix.size();
- while (PrefixLength < Comment.size() && Comment[PrefixLength] == ' ')
- ++PrefixLength;
- if (PrefixLength > LongestPrefix.size())
- LongestPrefix = Comment.substr(0, PrefixLength);
+ const auto PrefixLength =
+ Comment.find_first_not_of(' ', KnownPrefix.size());
+ return Comment.substr(0, PrefixLength);
}
}
- return LongestPrefix;
+ return {};
}
static BreakableToken::Split
@@ -86,22 +88,53 @@ getCommentSplit(StringRef Text, unsigned ContentStartColumn,
MaxSplitBytes += BytesInChar;
}
+ // In JavaScript, some @tags can be followed by {, and machinery that parses
+ // these comments will fail to understand the comment if followed by a line
+ // break. So avoid ever breaking before a {.
+ if (Style.Language == FormatStyle::LK_JavaScript) {
+ StringRef::size_type SpaceOffset =
+ Text.find_first_of(Blanks, MaxSplitBytes);
+ if (SpaceOffset != StringRef::npos && SpaceOffset + 1 < Text.size() &&
+ Text[SpaceOffset + 1] == '{') {
+ MaxSplitBytes = SpaceOffset + 1;
+ }
+ }
+
StringRef::size_type SpaceOffset = Text.find_last_of(Blanks, MaxSplitBytes);
static const auto kNumberedListRegexp = llvm::Regex("^[1-9][0-9]?\\.");
+ // Some spaces are unacceptable to break on, rewind past them.
while (SpaceOffset != StringRef::npos) {
+ // If a line-comment ends with `\`, the next line continues the comment,
+ // whether or not it starts with `//`. This is confusing and triggers
+ // -Wcomment.
+ // Avoid introducing multiline comments by not allowing a break right
+ // after '\'.
+ if (Style.isCpp()) {
+ StringRef::size_type LastNonBlank =
+ Text.find_last_not_of(Blanks, SpaceOffset);
+ if (LastNonBlank != StringRef::npos && Text[LastNonBlank] == '\\') {
+ SpaceOffset = Text.find_last_of(Blanks, LastNonBlank);
+ continue;
+ }
+ }
+
// Do not split before a number followed by a dot: this would be interpreted
// as a numbered list, which would prevent re-flowing in subsequent passes.
- if (kNumberedListRegexp.match(Text.substr(SpaceOffset).ltrim(Blanks)))
+ if (kNumberedListRegexp.match(Text.substr(SpaceOffset).ltrim(Blanks))) {
SpaceOffset = Text.find_last_of(Blanks, SpaceOffset);
- // In JavaScript, some @tags can be followed by {, and machinery that parses
- // these comments will fail to understand the comment if followed by a line
- // break. So avoid ever breaking before a {.
- else if (Style.Language == FormatStyle::LK_JavaScript &&
- SpaceOffset + 1 < Text.size() && Text[SpaceOffset + 1] == '{')
+ continue;
+ }
+
+ // Avoid ever breaking before a @tag or a { in JavaScript.
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ SpaceOffset + 1 < Text.size() &&
+ (Text[SpaceOffset + 1] == '{' || Text[SpaceOffset + 1] == '@')) {
SpaceOffset = Text.find_last_of(Blanks, SpaceOffset);
- else
- break;
+ continue;
+ }
+
+ break;
}
if (SpaceOffset == StringRef::npos ||
@@ -718,8 +751,7 @@ bool BreakableBlockComment::mayReflow(
}
BreakableLineCommentSection::BreakableLineCommentSection(
- const FormatToken &Token, unsigned StartColumn,
- unsigned OriginalStartColumn, bool FirstInLine, bool InPPDirective,
+ const FormatToken &Token, unsigned StartColumn, bool InPPDirective,
encoding::Encoding Encoding, const FormatStyle &Style)
: BreakableComment(Token, StartColumn, InPPDirective, Encoding, Style) {
assert(Tok.is(TT_LineComment) &&
@@ -742,10 +774,7 @@ BreakableLineCommentSection::BreakableLineCommentSection(
OriginalPrefix.resize(Lines.size());
for (size_t i = FirstLineIndex, e = Lines.size(); i < e; ++i) {
Lines[i] = Lines[i].ltrim(Blanks);
- // We need to trim the blanks in case this is not the first line in a
- // multiline comment. Then the indent is included in Lines[i].
- StringRef IndentPrefix =
- getLineCommentIndentPrefix(Lines[i].ltrim(Blanks), Style);
+ StringRef IndentPrefix = getLineCommentIndentPrefix(Lines[i], Style);
assert((TokenText.startswith("//") || TokenText.startswith("#")) &&
"unsupported line comment prefix, '//' and '#' are supported");
OriginalPrefix[i] = Prefix[i] = IndentPrefix;
@@ -761,9 +790,14 @@ BreakableLineCommentSection::BreakableLineCommentSection(
Prefix[i] = "///< ";
else if (Prefix[i] == "//!<")
Prefix[i] = "//!< ";
- else if (Prefix[i] == "#" &&
- Style.Language == FormatStyle::LK_TextProto)
+ else if (Prefix[i] == "#")
Prefix[i] = "# ";
+ else if (Prefix[i] == "##")
+ Prefix[i] = "## ";
+ else if (Prefix[i] == "###")
+ Prefix[i] = "### ";
+ else if (Prefix[i] == "####")
+ Prefix[i] = "#### ";
}
Tokens[i] = LineTok;
diff --git a/clang/lib/Format/BreakableToken.h b/clang/lib/Format/BreakableToken.h
index a6691300de3b..41b19f82e9df 100644
--- a/clang/lib/Format/BreakableToken.h
+++ b/clang/lib/Format/BreakableToken.h
@@ -436,7 +436,6 @@ private:
class BreakableLineCommentSection : public BreakableComment {
public:
BreakableLineCommentSection(const FormatToken &Token, unsigned StartColumn,
- unsigned OriginalStartColumn, bool FirstInLine,
bool InPPDirective, encoding::Encoding Encoding,
const FormatStyle &Style);
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index b1497651a8fe..7198671901f3 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -284,7 +284,7 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
// The opening "{" of a braced list has to be on the same line as the first
// element if it is nested in another braced init list or function call.
if (!Current.MustBreakBefore && Previous.is(tok::l_brace) &&
- Previous.isNot(TT_DictLiteral) && Previous.BlockKind == BK_BracedInit &&
+ Previous.isNot(TT_DictLiteral) && Previous.is(BK_BracedInit) &&
Previous.Previous &&
Previous.Previous->isOneOf(tok::l_brace, tok::l_paren, tok::comma))
return false;
@@ -400,7 +400,9 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
return true;
if (Current.is(TT_SelectorName) && !Previous.is(tok::at) &&
State.Stack.back().ObjCSelectorNameFound &&
- State.Stack.back().BreakBeforeParameter)
+ State.Stack.back().BreakBeforeParameter &&
+ (Style.ObjCBreakBeforeNestedBlockParam ||
+ !Current.startsSequence(TT_SelectorName, tok::colon, tok::caret)))
return true;
unsigned NewLineColumn = getNewLineColumn(State);
@@ -501,7 +503,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
// The following could be precomputed as they do not depend on the state.
// However, as they should take effect only if the UnwrappedLine does not fit
// into the ColumnLimit, they are checked here in the ContinuationIndenter.
- if (Style.ColumnLimit != 0 && Previous.BlockKind == BK_Block &&
+ if (Style.ColumnLimit != 0 && Previous.is(BK_Block) &&
Previous.is(tok::l_brace) && !Current.isOneOf(tok::r_brace, tok::comment))
return true;
@@ -627,7 +629,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// opening parenthesis. Don't break if it doesn't conserve columns.
if (Style.AlignAfterOpenBracket == FormatStyle::BAS_AlwaysBreak &&
(Previous.isOneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) ||
- (Previous.is(tok::l_brace) && Previous.BlockKind != BK_Block &&
+ (Previous.is(tok::l_brace) && Previous.isNot(BK_Block) &&
Style.Cpp11BracedListStyle)) &&
State.Column > getNewLineColumn(State) &&
(!Previous.Previous || !Previous.Previous->isOneOf(
@@ -648,7 +650,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
!State.Stack.back().IsCSharpGenericTypeConstraint &&
Previous.opensScope() && Previous.isNot(TT_ObjCMethodExpr) &&
- (Current.isNot(TT_LineComment) || Previous.BlockKind == BK_BracedInit)) {
+ (Current.isNot(TT_LineComment) || Previous.is(BK_BracedInit))) {
State.Stack.back().Indent = State.Column + Spaces;
State.Stack.back().IsAligned = true;
}
@@ -783,6 +785,22 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
State.Column = getNewLineColumn(State);
+ // Add Penalty proportional to amount of whitespace away from FirstColumn
+ // This tends to penalize several lines that are far-right indented,
+ // and prefers a line-break prior to such a block, e.g:
+ //
+ // Constructor() :
+ // member(value), looooooooooooooooong_member(
+ // looooooooooong_call(param_1, param_2, param_3))
+ // would then become
+ // Constructor() :
+ // member(value),
+ // looooooooooooooooong_member(
+ // looooooooooong_call(param_1, param_2, param_3))
+ if (State.Column > State.FirstIndent)
+ Penalty +=
+ Style.PenaltyIndentedWhitespace * (State.Column - State.FirstIndent);
+
// Indent nested blocks relative to this column, unless in a very specific
// JavaScript special case where:
//
@@ -972,7 +990,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return (Style.IndentWidth * State.Line->First->IndentLevel) +
Style.IndentWidth;
- if (NextNonComment->is(tok::l_brace) && NextNonComment->BlockKind == BK_Block)
+ if (NextNonComment->is(tok::l_brace) && NextNonComment->is(BK_Block))
return Current.NestingLevel == 0 ? State.FirstIndent
: State.Stack.back().Indent;
if ((Current.isOneOf(tok::r_brace, tok::r_square) ||
@@ -982,8 +1000,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
State.Stack.size() > 1) {
if (Current.closesBlockOrBlockTypeList(Style))
return State.Stack[State.Stack.size() - 2].NestedBlockIndent;
- if (Current.MatchingParen &&
- Current.MatchingParen->BlockKind == BK_BracedInit)
+ if (Current.MatchingParen && Current.MatchingParen->is(BK_BracedInit))
return State.Stack[State.Stack.size() - 2].LastSpace;
return State.FirstIndent;
}
@@ -1349,16 +1366,20 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
State.Stack.back().LastSpace);
}
- // If BreakBeforeBinaryOperators is set, un-indent a bit to account for
- // the operator and keep the operands aligned
- if (Style.AlignOperands == FormatStyle::OAS_AlignAfterOperator &&
- Previous &&
+ if (Previous &&
(Previous->getPrecedence() == prec::Assignment ||
Previous->is(tok::kw_return) ||
(*I == prec::Conditional && Previous->is(tok::question) &&
Previous->is(TT_ConditionalExpr))) &&
- !Newline)
- NewParenState.UnindentOperator = true;
+ !Newline) {
+ // If BreakBeforeBinaryOperators is set, un-indent a bit to account for
+ // the operator and keep the operands aligned
+ if (Style.AlignOperands == FormatStyle::OAS_AlignAfterOperator)
+ NewParenState.UnindentOperator = true;
+ // Mark indentation as alignment if the expression is aligned.
+ if (Style.AlignOperands != FormatStyle::OAS_DontAlign)
+ NewParenState.IsAligned = true;
+ }
// Do not indent relative to the fake parentheses inserted for "." or "->".
// This is a special case to make the following to statements consistent:
@@ -1417,7 +1438,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
State.Stack.back().IsCSharpGenericTypeConstraint)
return;
- if (Current.MatchingParen && Current.BlockKind == BK_Block) {
+ if (Current.MatchingParen && Current.is(BK_Block)) {
moveStateToNewBlock(State);
return;
}
@@ -1486,9 +1507,8 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
(State.Line->MustBeDeclaration && !BinPackDeclaration) ||
(!State.Line->MustBeDeclaration && !Style.BinPackArguments) ||
(Style.ExperimentalAutoDetectBinPacking &&
- (Current.PackingKind == PPK_OnePerLine ||
- (!BinPackInconclusiveFunctions &&
- Current.PackingKind == PPK_Inconclusive)));
+ (Current.is(PPK_OnePerLine) ||
+ (!BinPackInconclusiveFunctions && Current.is(PPK_Inconclusive))));
if (Current.is(TT_ObjCMethodExpr) && Current.MatchingParen &&
Style.ObjCBreakBeforeNestedBlockParam) {
@@ -1954,8 +1974,7 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
switchesFormatting(Current))
return nullptr;
return std::make_unique<BreakableLineCommentSection>(
- Current, StartColumn, Current.OriginalColumn, !Current.Previous,
- /*InPPDirective=*/false, Encoding, Style);
+ Current, StartColumn, /*InPPDirective=*/false, Encoding, Style);
}
return nullptr;
}
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 0d277a6464af..5f5bb8585ac1 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -128,6 +128,21 @@ template <> struct ScalarEnumerationTraits<FormatStyle::ShortFunctionStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::AlignConsecutiveStyle> {
+ static void enumeration(IO &IO, FormatStyle::AlignConsecutiveStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::ACS_None);
+ IO.enumCase(Value, "Consecutive", FormatStyle::ACS_Consecutive);
+ IO.enumCase(Value, "AcrossEmptyLines", FormatStyle::ACS_AcrossEmptyLines);
+ IO.enumCase(Value, "AcrossComments", FormatStyle::ACS_AcrossComments);
+ IO.enumCase(Value, "AcrossEmptyLinesAndComments",
+ FormatStyle::ACS_AcrossEmptyLinesAndComments);
+
+ // For backward compability.
+ IO.enumCase(Value, "true", FormatStyle::ACS_Consecutive);
+ IO.enumCase(Value, "false", FormatStyle::ACS_None);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::ShortIfStyle> {
static void enumeration(IO &IO, FormatStyle::ShortIfStyle &Value) {
IO.enumCase(Value, "Never", FormatStyle::SIS_Never);
@@ -227,6 +242,18 @@ struct ScalarEnumerationTraits<FormatStyle::BreakInheritanceListStyle> {
};
template <>
+struct ScalarEnumerationTraits<
+ FormatStyle::EmptyLineBeforeAccessModifierStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::EmptyLineBeforeAccessModifierStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::ELBAMS_Never);
+ IO.enumCase(Value, "Leave", FormatStyle::ELBAMS_Leave);
+ IO.enumCase(Value, "LogicalBlock", FormatStyle::ELBAMS_LogicalBlock);
+ IO.enumCase(Value, "Always", FormatStyle::ELBAMS_Always);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::PPDirectiveIndentStyle> {
static void enumeration(IO &IO, FormatStyle::PPDirectiveIndentStyle &Value) {
IO.enumCase(Value, "None", FormatStyle::PPDIS_None);
@@ -348,6 +375,17 @@ template <> struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::SpaceAroundPointerQualifiersStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::SpaceAroundPointerQualifiersStyle &Value) {
+ IO.enumCase(Value, "Default", FormatStyle::SAPQ_Default);
+ IO.enumCase(Value, "Before", FormatStyle::SAPQ_Before);
+ IO.enumCase(Value, "After", FormatStyle::SAPQ_After);
+ IO.enumCase(Value, "Both", FormatStyle::SAPQ_Both);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
static void enumeration(IO &IO,
FormatStyle::SpaceBeforeParensOptions &Value) {
@@ -366,6 +404,26 @@ struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
}
};
+template <>
+struct ScalarEnumerationTraits<FormatStyle::BitFieldColonSpacingStyle> {
+ static void enumeration(IO &IO,
+ FormatStyle::BitFieldColonSpacingStyle &Value) {
+ IO.enumCase(Value, "Both", FormatStyle::BFCS_Both);
+ IO.enumCase(Value, "None", FormatStyle::BFCS_None);
+ IO.enumCase(Value, "Before", FormatStyle::BFCS_Before);
+ IO.enumCase(Value, "After", FormatStyle::BFCS_After);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::SortJavaStaticImportOptions> {
+ static void enumeration(IO &IO,
+ FormatStyle::SortJavaStaticImportOptions &Value) {
+ IO.enumCase(Value, "Before", FormatStyle::SJSIO_Before);
+ IO.enumCase(Value, "After", FormatStyle::SJSIO_After);
+ }
+};
+
template <> struct MappingTraits<FormatStyle> {
static void mapping(IO &IO, FormatStyle &Style) {
// When reading, read the language first, we need it for getPredefinedStyle.
@@ -464,11 +522,14 @@ template <> struct MappingTraits<FormatStyle> {
Style.AlwaysBreakBeforeMultilineStrings);
IO.mapOptional("AlwaysBreakTemplateDeclarations",
Style.AlwaysBreakTemplateDeclarations);
+ IO.mapOptional("AttributeMacros", Style.AttributeMacros);
IO.mapOptional("BinPackArguments", Style.BinPackArguments);
IO.mapOptional("BinPackParameters", Style.BinPackParameters);
IO.mapOptional("BraceWrapping", Style.BraceWrapping);
IO.mapOptional("BreakBeforeBinaryOperators",
Style.BreakBeforeBinaryOperators);
+ IO.mapOptional("BreakBeforeConceptDeclarations",
+ Style.BreakBeforeConceptDeclarations);
IO.mapOptional("BreakBeforeBraces", Style.BreakBeforeBraces);
bool BreakBeforeInheritanceComma = false;
@@ -511,10 +572,14 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("DeriveLineEnding", Style.DeriveLineEnding);
IO.mapOptional("DerivePointerAlignment", Style.DerivePointerAlignment);
IO.mapOptional("DisableFormat", Style.DisableFormat);
+ IO.mapOptional("EmptyLineBeforeAccessModifier",
+ Style.EmptyLineBeforeAccessModifier);
IO.mapOptional("ExperimentalAutoDetectBinPacking",
Style.ExperimentalAutoDetectBinPacking);
IO.mapOptional("FixNamespaceComments", Style.FixNamespaceComments);
IO.mapOptional("ForEachMacros", Style.ForEachMacros);
+ IO.mapOptional("StatementAttributeLikeMacros",
+ Style.StatementAttributeLikeMacros);
IO.mapOptional("IncludeBlocks", Style.IncludeStyle.IncludeBlocks);
IO.mapOptional("IncludeCategories", Style.IncludeStyle.IncludeCategories);
IO.mapOptional("IncludeIsMainRegex", Style.IncludeStyle.IncludeIsMainRegex);
@@ -525,6 +590,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("IndentGotoLabels", Style.IndentGotoLabels);
IO.mapOptional("IndentPPDirectives", Style.IndentPPDirectives);
IO.mapOptional("IndentExternBlock", Style.IndentExternBlock);
+ IO.mapOptional("IndentRequires", Style.IndentRequires);
IO.mapOptional("IndentWidth", Style.IndentWidth);
IO.mapOptional("IndentWrappedFunctionNames",
Style.IndentWrappedFunctionNames);
@@ -558,10 +624,13 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("PenaltyExcessCharacter", Style.PenaltyExcessCharacter);
IO.mapOptional("PenaltyReturnTypeOnItsOwnLine",
Style.PenaltyReturnTypeOnItsOwnLine);
+ IO.mapOptional("PenaltyIndentedWhitespace",
+ Style.PenaltyIndentedWhitespace);
IO.mapOptional("PointerAlignment", Style.PointerAlignment);
IO.mapOptional("RawStringFormats", Style.RawStringFormats);
IO.mapOptional("ReflowComments", Style.ReflowComments);
IO.mapOptional("SortIncludes", Style.SortIncludes);
+ IO.mapOptional("SortJavaStaticImport", Style.SortJavaStaticImport);
IO.mapOptional("SortUsingDeclarations", Style.SortUsingDeclarations);
IO.mapOptional("SpaceAfterCStyleCast", Style.SpaceAfterCStyleCast);
IO.mapOptional("SpaceAfterLogicalNot", Style.SpaceAfterLogicalNot);
@@ -569,6 +638,7 @@ template <> struct MappingTraits<FormatStyle> {
Style.SpaceAfterTemplateKeyword);
IO.mapOptional("SpaceBeforeAssignmentOperators",
Style.SpaceBeforeAssignmentOperators);
+ IO.mapOptional("SpaceBeforeCaseColon", Style.SpaceBeforeCaseColon);
IO.mapOptional("SpaceBeforeCpp11BracedList",
Style.SpaceBeforeCpp11BracedList);
IO.mapOptional("SpaceBeforeCtorInitializerColon",
@@ -576,6 +646,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("SpaceBeforeInheritanceColon",
Style.SpaceBeforeInheritanceColon);
IO.mapOptional("SpaceBeforeParens", Style.SpaceBeforeParens);
+ IO.mapOptional("SpaceAroundPointerQualifiers",
+ Style.SpaceAroundPointerQualifiers);
IO.mapOptional("SpaceBeforeRangeBasedForLoopColon",
Style.SpaceBeforeRangeBasedForLoopColon);
IO.mapOptional("SpaceInEmptyBlock", Style.SpaceInEmptyBlock);
@@ -593,6 +665,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("SpacesInSquareBrackets", Style.SpacesInSquareBrackets);
IO.mapOptional("SpaceBeforeSquareBrackets",
Style.SpaceBeforeSquareBrackets);
+ IO.mapOptional("BitFieldColonSpacing", Style.BitFieldColonSpacing);
IO.mapOptional("Standard", Style.Standard);
IO.mapOptional("StatementMacros", Style.StatementMacros);
IO.mapOptional("TabWidth", Style.TabWidth);
@@ -757,6 +830,7 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Expanded.BraceWrapping.BeforeCatch = true;
Expanded.BraceWrapping.BeforeElse = true;
+ Expanded.BraceWrapping.BeforeLambdaBody = true;
break;
case FormatStyle::BS_Whitesmiths:
Expanded.BraceWrapping.AfterCaseLabel = true;
@@ -812,10 +886,10 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AlignAfterOpenBracket = FormatStyle::BAS_Align;
LLVMStyle.AlignOperands = FormatStyle::OAS_Align;
LLVMStyle.AlignTrailingComments = true;
- LLVMStyle.AlignConsecutiveAssignments = false;
- LLVMStyle.AlignConsecutiveBitFields = false;
- LLVMStyle.AlignConsecutiveDeclarations = false;
- LLVMStyle.AlignConsecutiveMacros = false;
+ LLVMStyle.AlignConsecutiveAssignments = FormatStyle::ACS_None;
+ LLVMStyle.AlignConsecutiveBitFields = FormatStyle::ACS_None;
+ LLVMStyle.AlignConsecutiveDeclarations = FormatStyle::ACS_None;
+ LLVMStyle.AlignConsecutiveMacros = FormatStyle::ACS_None;
LLVMStyle.AllowAllArgumentsOnNextLine = true;
LLVMStyle.AllowAllConstructorInitializersOnNextLine = true;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
@@ -830,9 +904,11 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_None;
LLVMStyle.AlwaysBreakBeforeMultilineStrings = false;
LLVMStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_MultiLine;
+ LLVMStyle.AttributeMacros.push_back("__capability");
LLVMStyle.BinPackArguments = true;
LLVMStyle.BinPackParameters = true;
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
+ LLVMStyle.BreakBeforeConceptDeclarations = true;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
LLVMStyle.BraceWrapping = {/*AfterCaseLabel=*/false,
@@ -867,21 +943,23 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.Cpp11BracedListStyle = true;
LLVMStyle.DeriveLineEnding = true;
LLVMStyle.DerivePointerAlignment = false;
+ LLVMStyle.EmptyLineBeforeAccessModifier = FormatStyle::ELBAMS_LogicalBlock;
LLVMStyle.ExperimentalAutoDetectBinPacking = false;
LLVMStyle.FixNamespaceComments = true;
LLVMStyle.ForEachMacros.push_back("foreach");
LLVMStyle.ForEachMacros.push_back("Q_FOREACH");
LLVMStyle.ForEachMacros.push_back("BOOST_FOREACH");
LLVMStyle.IncludeStyle.IncludeCategories = {
- {"^\"(llvm|llvm-c|clang|clang-c)/", 2, 0},
- {"^(<|\"(gtest|gmock|isl|json)/)", 3, 0},
- {".*", 1, 0}};
+ {"^\"(llvm|llvm-c|clang|clang-c)/", 2, 0, false},
+ {"^(<|\"(gtest|gmock|isl|json)/)", 3, 0, false},
+ {".*", 1, 0, false}};
LLVMStyle.IncludeStyle.IncludeIsMainRegex = "(Test)?$";
LLVMStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Preserve;
LLVMStyle.IndentCaseLabels = false;
LLVMStyle.IndentCaseBlocks = false;
LLVMStyle.IndentGotoLabels = true;
LLVMStyle.IndentPPDirectives = FormatStyle::PPDIS_None;
+ LLVMStyle.IndentRequires = false;
LLVMStyle.IndentWrappedFunctionNames = false;
LLVMStyle.IndentWidth = 2;
LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None;
@@ -911,6 +989,8 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceAfterCStyleCast = false;
LLVMStyle.SpaceAfterLogicalNot = false;
LLVMStyle.SpaceAfterTemplateKeyword = true;
+ LLVMStyle.SpaceAroundPointerQualifiers = FormatStyle::SAPQ_Default;
+ LLVMStyle.SpaceBeforeCaseColon = false;
LLVMStyle.SpaceBeforeCtorInitializerColon = true;
LLVMStyle.SpaceBeforeInheritanceColon = true;
LLVMStyle.SpaceBeforeParens = FormatStyle::SBPO_ControlStatements;
@@ -918,6 +998,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceBeforeAssignmentOperators = true;
LLVMStyle.SpaceBeforeCpp11BracedList = false;
LLVMStyle.SpaceBeforeSquareBrackets = false;
+ LLVMStyle.BitFieldColonSpacing = FormatStyle::BFCS_Both;
LLVMStyle.SpacesInAngles = false;
LLVMStyle.SpacesInConditionalStatement = false;
@@ -929,15 +1010,20 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
LLVMStyle.PenaltyBreakTemplateDeclaration = prec::Relational;
+ LLVMStyle.PenaltyIndentedWhitespace = 0;
LLVMStyle.DisableFormat = false;
LLVMStyle.SortIncludes = true;
+ LLVMStyle.SortJavaStaticImport = FormatStyle::SJSIO_Before;
LLVMStyle.SortUsingDeclarations = true;
+ LLVMStyle.StatementAttributeLikeMacros.push_back("Q_EMIT");
LLVMStyle.StatementMacros.push_back("Q_UNUSED");
LLVMStyle.StatementMacros.push_back("QT_REQUIRE_VERSION");
LLVMStyle.WhitespaceSensitiveMacros.push_back("STRINGIZE");
LLVMStyle.WhitespaceSensitiveMacros.push_back("PP_STRINGIZE");
LLVMStyle.WhitespaceSensitiveMacros.push_back("BOOST_PP_STRINGIZE");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("NS_SWIFT_NAME");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("CF_SWIFT_NAME");
// Defaults that differ when not C++.
if (Language == FormatStyle::LK_TableGen) {
@@ -966,10 +1052,10 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_Yes;
GoogleStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = true;
GoogleStyle.DerivePointerAlignment = true;
- GoogleStyle.IncludeStyle.IncludeCategories = {{"^<ext/.*\\.h>", 2, 0},
- {"^<.*\\.h>", 1, 0},
- {"^<.*", 2, 0},
- {".*", 3, 0}};
+ GoogleStyle.IncludeStyle.IncludeCategories = {{"^<ext/.*\\.h>", 2, 0, false},
+ {"^<.*\\.h>", 1, 0, false},
+ {"^<.*", 2, 0, false},
+ {".*", 3, 0, false}};
GoogleStyle.IncludeStyle.IncludeIsMainRegex = "([-_](test|unittest))?$";
GoogleStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
GoogleStyle.IndentCaseLabels = true;
@@ -1275,20 +1361,23 @@ bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
return true;
}
-std::error_code parseConfiguration(StringRef Text, FormatStyle *Style) {
+std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
+ FormatStyle *Style,
+ bool AllowUnknownOptions) {
assert(Style);
FormatStyle::LanguageKind Language = Style->Language;
assert(Language != FormatStyle::LK_None);
- if (Text.trim().empty())
+ if (Config.getBuffer().trim().empty())
return make_error_code(ParseError::Error);
Style->StyleSet.Clear();
std::vector<FormatStyle> Styles;
- llvm::yaml::Input Input(Text);
+ llvm::yaml::Input Input(Config);
// DocumentListTraits<vector<FormatStyle>> uses the context to get default
// values for the fields, keys for which are missing from the configuration.
// Mapping also uses the context to get the language to find the correct
// base style.
Input.setContext(Style);
+ Input.setAllowUnknownKeys(AllowUnknownOptions);
Input >> Styles;
if (Input.error())
return Input.error();
@@ -1562,9 +1651,9 @@ private:
continue;
FormatToken *Tok = AnnotatedLines[i]->First->Next;
while (Tok->Next) {
- if (Tok->PackingKind == PPK_BinPacked)
+ if (Tok->is(PPK_BinPacked))
HasBinPackedFunction = true;
- if (Tok->PackingKind == PPK_OnePerLine)
+ if (Tok->is(PPK_OnePerLine))
HasOnePerLineFunction = true;
Tok = Tok->Next;
@@ -1980,6 +2069,10 @@ private:
};
for (auto Line : AnnotatedLines) {
+ if (Line->First && (Line->First->TokenText.startswith("#") ||
+ Line->First->TokenText == "__pragma" ||
+ Line->First->TokenText == "_Pragma"))
+ continue;
for (const FormatToken *FormatTok = Line->First; FormatTok;
FormatTok = FormatTok->Next) {
if ((FormatTok->Previous && FormatTok->Previous->is(tok::at) &&
@@ -2132,7 +2225,8 @@ static void sortCppIncludes(const FormatStyle &Style,
// Deduplicate #includes.
Indices.erase(std::unique(Indices.begin(), Indices.end(),
[&](unsigned LHSI, unsigned RHSI) {
- return Includes[LHSI].Text == Includes[RHSI].Text;
+ return Includes[LHSI].Text.trim() ==
+ Includes[RHSI].Text.trim();
}),
Indices.end());
@@ -2191,7 +2285,9 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
StringRef FileName,
tooling::Replacements &Replaces,
unsigned *Cursor) {
- unsigned Prev = 0;
+ unsigned Prev = llvm::StringSwitch<size_t>(Code)
+ .StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM
+ .Default(0);
unsigned SearchFrom = 0;
llvm::Regex IncludeRegex(CppIncludeRegexPattern);
SmallVector<StringRef, 4> Matches;
@@ -2227,7 +2323,8 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
Style.IncludeStyle.IncludeBlocks ==
tooling::IncludeStyle::IBS_Regroup);
- if (!FormattingOff && !Line.endswith("\\")) {
+ bool MergeWithNextLine = Trimmed.endswith("\\");
+ if (!FormattingOff && !MergeWithNextLine) {
if (IncludeRegex.match(Line, &Matches)) {
StringRef IncludeName = Matches[2];
int Category = Categories.getIncludePriority(
@@ -2243,12 +2340,17 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
sortCppIncludes(Style, IncludesInBlock, Ranges, FileName, Code,
Replaces, Cursor);
IncludesInBlock.clear();
- FirstIncludeBlock = false;
+ if (Trimmed.startswith("#pragma hdrstop")) // Precompiled headers.
+ FirstIncludeBlock = true;
+ else
+ FirstIncludeBlock = false;
}
- Prev = Pos + 1;
}
if (Pos == StringRef::npos || Pos + 1 == Code.size())
break;
+
+ if (!MergeWithNextLine)
+ Prev = Pos + 1;
SearchFrom = Pos + 1;
}
if (!IncludesInBlock.empty()) {
@@ -2297,12 +2399,16 @@ static void sortJavaImports(const FormatStyle &Style,
JavaImportGroups.push_back(
findJavaImportGroup(Style, Imports[i].Identifier));
}
+ bool StaticImportAfterNormalImport =
+ Style.SortJavaStaticImport == FormatStyle::SJSIO_After;
llvm::sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
// Negating IsStatic to push static imports above non-static imports.
- return std::make_tuple(!Imports[LHSI].IsStatic, JavaImportGroups[LHSI],
- Imports[LHSI].Identifier) <
- std::make_tuple(!Imports[RHSI].IsStatic, JavaImportGroups[RHSI],
- Imports[RHSI].Identifier);
+ return std::make_tuple(!Imports[LHSI].IsStatic ^
+ StaticImportAfterNormalImport,
+ JavaImportGroups[LHSI], Imports[LHSI].Identifier) <
+ std::make_tuple(!Imports[RHSI].IsStatic ^
+ StaticImportAfterNormalImport,
+ JavaImportGroups[RHSI], Imports[RHSI].Identifier);
});
// Deduplicate imports.
@@ -2726,6 +2832,7 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
LangOpts.ObjC = 1;
LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally.
LangOpts.DeclSpecKeyword = 1; // To get __declspec.
+ LangOpts.C99 = 1; // To get kw_restrict for non-underscore-prefixed restrict.
return LangOpts;
}
@@ -2787,8 +2894,8 @@ const char *DefaultFallbackStyle = "LLVM";
llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
StringRef FallbackStyleName,
- StringRef Code,
- llvm::vfs::FileSystem *FS) {
+ StringRef Code, llvm::vfs::FileSystem *FS,
+ bool AllowUnknownOptions) {
if (!FS) {
FS = llvm::vfs::getRealFileSystem().get();
}
@@ -2800,7 +2907,9 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
if (StyleName.startswith("{")) {
// Parse YAML/JSON style from the command line.
- if (std::error_code ec = parseConfiguration(StyleName, &Style))
+ if (std::error_code ec = parseConfiguration(
+ llvm::MemoryBufferRef(StyleName, "<command-line>"), &Style,
+ AllowUnknownOptions))
return make_string_error("Error parsing -style: " + ec.message());
return Style;
}
@@ -2845,7 +2954,7 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
if (std::error_code EC = Text.getError())
return make_string_error(EC.message());
if (std::error_code ec =
- parseConfiguration(Text.get()->getBuffer(), &Style)) {
+ parseConfiguration(*Text.get(), &Style, AllowUnknownOptions)) {
if (ec == ParseError::Unsuitable) {
if (!UnsuitableConfigFiles.empty())
UnsuitableConfigFiles.append(", ");
diff --git a/clang/lib/Format/FormatInternal.h b/clang/lib/Format/FormatInternal.h
index 3aa616da23d8..9043ce32e9e3 100644
--- a/clang/lib/Format/FormatInternal.h
+++ b/clang/lib/Format/FormatInternal.h
@@ -16,7 +16,6 @@
#define LLVM_CLANG_LIB_FORMAT_FORMATINTERNAL_H
#include "BreakableToken.h"
-#include "clang/Tooling/Core/Lookup.h"
#include <utility>
namespace clang {
diff --git a/clang/lib/Format/FormatToken.cpp b/clang/lib/Format/FormatToken.cpp
index 7d792974cd57..8e4994f4c0d5 100644
--- a/clang/lib/Format/FormatToken.cpp
+++ b/clang/lib/Format/FormatToken.cpp
@@ -62,6 +62,7 @@ bool FormatToken::isSimpleTypeSpecifier() const {
case tok::kw_char32_t:
case tok::kw_typeof:
case tok::kw_decltype:
+ case tok::kw__Atomic:
return true;
default:
return false;
@@ -85,8 +86,8 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
const FormatToken *LBrace =
State.NextToken->Previous->getPreviousNonComment();
if (!LBrace || !LBrace->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
- LBrace->BlockKind == BK_Block || LBrace->getType() == TT_DictLiteral ||
- LBrace->Next->getType() == TT_DesignatedInitializerPeriod)
+ LBrace->is(BK_Block) || LBrace->is(TT_DictLiteral) ||
+ LBrace->Next->is(TT_DesignatedInitializerPeriod))
return 0;
// Calculate the number of code points we have to format this list. As the
diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h
index d4287f53fde3..2f53b338379d 100644
--- a/clang/lib/Format/FormatToken.h
+++ b/clang/lib/Format/FormatToken.h
@@ -29,6 +29,7 @@ namespace format {
TYPE(ArrayInitializerLSquare) \
TYPE(ArraySubscriptLSquare) \
TYPE(AttributeColon) \
+ TYPE(AttributeMacro) \
TYPE(AttributeParen) \
TYPE(AttributeSquare) \
TYPE(BinaryOperator) \
@@ -39,6 +40,7 @@ namespace format {
TYPE(ConflictAlternative) \
TYPE(ConflictEnd) \
TYPE(ConflictStart) \
+ TYPE(ConstraintJunctions) \
TYPE(CtorInitializerColon) \
TYPE(CtorInitializerComma) \
TYPE(DesignatedInitializerLSquare) \
@@ -67,6 +69,9 @@ namespace format {
TYPE(JsTypeColon) \
TYPE(JsTypeOperator) \
TYPE(JsTypeOptionalQuestion) \
+ TYPE(JsAndAndEqual) \
+ TYPE(JsPipePipeEqual) \
+ TYPE(JsNullishCoalescingEqual) \
TYPE(LambdaArrow) \
TYPE(LambdaLBrace) \
TYPE(LambdaLSquare) \
@@ -91,6 +96,7 @@ namespace format {
TYPE(RegexLiteral) \
TYPE(SelectorName) \
TYPE(StartOfName) \
+ TYPE(StatementAttributeLikeMacro) \
TYPE(StatementMacro) \
TYPE(StructuredBindingLSquare) \
TYPE(TemplateCloser) \
@@ -100,6 +106,7 @@ namespace format {
TYPE(TrailingAnnotation) \
TYPE(TrailingReturnArrow) \
TYPE(TrailingUnaryOperator) \
+ TYPE(TypeDeclarationParen) \
TYPE(TypenameMacro) \
TYPE(UnaryOperator) \
TYPE(UntouchableMacroFunc) \
@@ -116,7 +123,7 @@ namespace format {
/// Determines the semantic type of a syntactic token, e.g. whether "<" is a
/// template opener or binary operator.
-enum TokenType {
+enum TokenType : uint8_t {
#define TYPE(X) TT_##X,
LIST_TOKEN_TYPES
#undef TYPE
@@ -134,86 +141,216 @@ enum ParameterPackingKind { PPK_BinPacked, PPK_OnePerLine, PPK_Inconclusive };
enum FormatDecision { FD_Unformatted, FD_Continue, FD_Break };
+/// Roles a token can take in a configured macro expansion.
+enum MacroRole {
+ /// The token was expanded from a macro argument when formatting the expanded
+ /// token sequence.
+ MR_ExpandedArg,
+ /// The token is part of a macro argument that was previously formatted as
+ /// expansion when formatting the unexpanded macro call.
+ MR_UnexpandedArg,
+ /// The token was expanded from a macro definition, and is not visible as part
+ /// of the macro call.
+ MR_Hidden,
+};
+
+struct FormatToken;
+
+/// Contains information on the token's role in a macro expansion.
+///
+/// Given the following definitions:
+/// A(X) = [ X ]
+/// B(X) = < X >
+/// C(X) = X
+///
+/// Consider the macro call:
+/// A({B(C(C(x)))}) -> [{<x>}]
+///
+/// In this case, the tokens of the unexpanded macro call will have the
+/// following relevant entries in their macro context (note that formatting
+/// the unexpanded macro call happens *after* formatting the expanded macro
+/// call):
+/// A( { B( C( C(x) ) ) } )
+/// Role: NN U NN NN NNUN N N U N (N=None, U=UnexpandedArg)
+///
+/// [ { < x > } ]
+/// Role: H E H E H E H (H=Hidden, E=ExpandedArg)
+/// ExpandedFrom[0]: A A A A A A A
+/// ExpandedFrom[1]: B B B
+/// ExpandedFrom[2]: C
+/// ExpandedFrom[3]: C
+/// StartOfExpansion: 1 0 1 2 0 0 0
+/// EndOfExpansion: 0 0 0 2 1 0 1
+struct MacroExpansion {
+ MacroExpansion(MacroRole Role) : Role(Role) {}
+
+ /// The token's role in the macro expansion.
+ /// When formatting an expanded macro, all tokens that are part of macro
+ /// arguments will be MR_ExpandedArg, while all tokens that are not visible in
+ /// the macro call will be MR_Hidden.
+ /// When formatting an unexpanded macro call, all tokens that are part of
+ /// macro arguments will be MR_UnexpandedArg.
+ MacroRole Role;
+
+ /// The stack of macro call identifier tokens this token was expanded from.
+ llvm::SmallVector<FormatToken *, 1> ExpandedFrom;
+
+ /// The number of expansions of which this macro is the first entry.
+ unsigned StartOfExpansion = 0;
+
+ /// The number of currently open expansions in \c ExpandedFrom this macro is
+ /// the last token in.
+ unsigned EndOfExpansion = 0;
+};
+
class TokenRole;
class AnnotatedLine;
/// A wrapper around a \c Token storing information about the
/// whitespace characters preceding it.
struct FormatToken {
- FormatToken() {}
+ FormatToken()
+ : HasUnescapedNewline(false), IsMultiline(false), IsFirst(false),
+ MustBreakBefore(false), IsUnterminatedLiteral(false),
+ CanBreakBefore(false), ClosesTemplateDeclaration(false),
+ StartsBinaryExpression(false), EndsBinaryExpression(false),
+ PartOfMultiVariableDeclStmt(false), ContinuesLineCommentSection(false),
+ Finalized(false), BlockKind(BK_Unknown), Decision(FD_Unformatted),
+ PackingKind(PPK_Inconclusive), Type(TT_Unknown) {}
/// The \c Token.
Token Tok;
- /// The number of newlines immediately before the \c Token.
+ /// The raw text of the token.
///
- /// This can be used to determine what the user wrote in the original code
- /// and thereby e.g. leave an empty line between two function definitions.
- unsigned NewlinesBefore = 0;
+ /// Contains the raw token text without leading whitespace and without leading
+ /// escaped newlines.
+ StringRef TokenText;
- /// Whether there is at least one unescaped newline before the \c
- /// Token.
- bool HasUnescapedNewline = false;
+ /// A token can have a special role that can carry extra information
+ /// about the token's formatting.
+ /// FIXME: Make FormatToken for parsing and AnnotatedToken two different
+ /// classes and make this a unique_ptr in the AnnotatedToken class.
+ std::shared_ptr<TokenRole> Role;
/// The range of the whitespace immediately preceding the \c Token.
SourceRange WhitespaceRange;
- /// The offset just past the last '\n' in this token's leading
- /// whitespace (relative to \c WhiteSpaceStart). 0 if there is no '\n'.
- unsigned LastNewlineOffset = 0;
-
- /// The width of the non-whitespace parts of the token (or its first
- /// line for multi-line tokens) in columns.
- /// We need this to correctly measure number of columns a token spans.
- unsigned ColumnWidth = 0;
-
- /// Contains the width in columns of the last line of a multi-line
- /// token.
- unsigned LastLineColumnWidth = 0;
+ /// Whether there is at least one unescaped newline before the \c
+ /// Token.
+ unsigned HasUnescapedNewline : 1;
/// Whether the token text contains newlines (escaped or not).
- bool IsMultiline = false;
+ unsigned IsMultiline : 1;
/// Indicates that this is the first token of the file.
- bool IsFirst = false;
+ unsigned IsFirst : 1;
/// Whether there must be a line break before this token.
///
/// This happens for example when a preprocessor directive ended directly
/// before the token.
- bool MustBreakBefore = false;
+ unsigned MustBreakBefore : 1;
+
+ /// Set to \c true if this token is an unterminated literal.
+ unsigned IsUnterminatedLiteral : 1;
+
+ /// \c true if it is allowed to break before this token.
+ unsigned CanBreakBefore : 1;
+
+ /// \c true if this is the ">" of "template<..>".
+ unsigned ClosesTemplateDeclaration : 1;
- /// Whether to not align across this token
+ /// \c true if this token starts a binary expression, i.e. has at least
+ /// one fake l_paren with a precedence greater than prec::Unknown.
+ unsigned StartsBinaryExpression : 1;
+ /// \c true if this token ends a binary expression.
+ unsigned EndsBinaryExpression : 1;
+
+ /// Is this token part of a \c DeclStmt defining multiple variables?
///
- /// This happens for example when a preprocessor directive ended directly
- /// before the token, but very rarely otherwise.
- bool MustBreakAlignBefore = false;
+ /// Only set if \c Type == \c TT_StartOfName.
+ unsigned PartOfMultiVariableDeclStmt : 1;
- /// The raw text of the token.
+ /// Does this line comment continue a line comment section?
///
- /// Contains the raw token text without leading whitespace and without leading
- /// escaped newlines.
- StringRef TokenText;
+ /// Only set to true if \c Type == \c TT_LineComment.
+ unsigned ContinuesLineCommentSection : 1;
- /// Set to \c true if this token is an unterminated literal.
- bool IsUnterminatedLiteral = 0;
+ /// If \c true, this token has been fully formatted (indented and
+ /// potentially re-formatted inside), and we do not allow further formatting
+ /// changes.
+ unsigned Finalized : 1;
+private:
/// Contains the kind of block if this token is a brace.
- BraceBlockKind BlockKind = BK_Unknown;
+ unsigned BlockKind : 2;
+
+public:
+ BraceBlockKind getBlockKind() const {
+ return static_cast<BraceBlockKind>(BlockKind);
+ }
+ void setBlockKind(BraceBlockKind BBK) {
+ BlockKind = BBK;
+ assert(getBlockKind() == BBK && "BraceBlockKind overflow!");
+ }
+
+private:
+ /// Stores the formatting decision for the token once it was made.
+ unsigned Decision : 2;
+
+public:
+ FormatDecision getDecision() const {
+ return static_cast<FormatDecision>(Decision);
+ }
+ void setDecision(FormatDecision D) {
+ Decision = D;
+ assert(getDecision() == D && "FormatDecision overflow!");
+ }
+private:
+ /// If this is an opening parenthesis, how are the parameters packed?
+ unsigned PackingKind : 2;
+
+public:
+ ParameterPackingKind getPackingKind() const {
+ return static_cast<ParameterPackingKind>(PackingKind);
+ }
+ void setPackingKind(ParameterPackingKind K) {
+ PackingKind = K;
+ assert(getPackingKind() == K && "ParameterPackingKind overflow!");
+ }
+
+private:
+ TokenType Type;
+
+public:
/// Returns the token's type, e.g. whether "<" is a template opener or
/// binary operator.
TokenType getType() const { return Type; }
void setType(TokenType T) { Type = T; }
- /// The number of spaces that should be inserted before this token.
- unsigned SpacesRequiredBefore = 0;
+ /// The number of newlines immediately before the \c Token.
+ ///
+ /// This can be used to determine what the user wrote in the original code
+ /// and thereby e.g. leave an empty line between two function definitions.
+ unsigned NewlinesBefore = 0;
- /// \c true if it is allowed to break before this token.
- bool CanBreakBefore = false;
+ /// The offset just past the last '\n' in this token's leading
+ /// whitespace (relative to \c WhiteSpaceStart). 0 if there is no '\n'.
+ unsigned LastNewlineOffset = 0;
- /// \c true if this is the ">" of "template<..>".
- bool ClosesTemplateDeclaration = false;
+ /// The width of the non-whitespace parts of the token (or its first
+ /// line for multi-line tokens) in columns.
+ /// We need this to correctly measure number of columns a token spans.
+ unsigned ColumnWidth = 0;
+
+ /// Contains the width in columns of the last line of a multi-line
+ /// token.
+ unsigned LastLineColumnWidth = 0;
+
+ /// The number of spaces that should be inserted before this token.
+ unsigned SpacesRequiredBefore = 0;
/// Number of parameters, if this is "(", "[" or "<".
unsigned ParameterCount = 0;
@@ -226,13 +363,6 @@ struct FormatToken {
/// the surrounding bracket.
tok::TokenKind ParentBracket = tok::unknown;
- /// A token can have a special role that can carry extra information
- /// about the token's formatting.
- std::unique_ptr<TokenRole> Role;
-
- /// If this is an opening parenthesis, how are the parameters packed?
- ParameterPackingKind PackingKind = PPK_Inconclusive;
-
/// The total length of the unwrapped line up to and including this
/// token.
unsigned TotalLength = 0;
@@ -286,12 +416,6 @@ struct FormatToken {
/// Insert this many fake ) after this token for correct indentation.
unsigned FakeRParens = 0;
- /// \c true if this token starts a binary expression, i.e. has at least
- /// one fake l_paren with a precedence greater than prec::Unknown.
- bool StartsBinaryExpression = false;
- /// \c true if this token ends a binary expression.
- bool EndsBinaryExpression = false;
-
/// If this is an operator (or "."/"->") in a sequence of operators
/// with the same precedence, contains the 0-based operator index.
unsigned OperatorIndex = 0;
@@ -300,16 +424,6 @@ struct FormatToken {
/// with the same precedence, points to the next operator.
FormatToken *NextOperator = nullptr;
- /// Is this token part of a \c DeclStmt defining multiple variables?
- ///
- /// Only set if \c Type == \c TT_StartOfName.
- bool PartOfMultiVariableDeclStmt = false;
-
- /// Does this line comment continue a line comment section?
- ///
- /// Only set to true if \c Type == \c TT_LineComment.
- bool ContinuesLineCommentSection = false;
-
/// If this is a bracket, this points to the matching one.
FormatToken *MatchingParen = nullptr;
@@ -323,16 +437,12 @@ struct FormatToken {
/// in it.
SmallVector<AnnotatedLine *, 1> Children;
- /// Stores the formatting decision for the token once it was made.
- FormatDecision Decision = FD_Unformatted;
-
- /// If \c true, this token has been fully formatted (indented and
- /// potentially re-formatted inside), and we do not allow further formatting
- /// changes.
- bool Finalized = false;
+ // Contains all attributes related to how this token takes part
+ // in a configured macro expansion.
+ llvm::Optional<MacroExpansion> MacroCtx;
bool is(tok::TokenKind Kind) const { return Tok.is(Kind); }
- bool is(TokenType TT) const { return Type == TT; }
+ bool is(TokenType TT) const { return getType() == TT; }
bool is(const IdentifierInfo *II) const {
return II && II == Tok.getIdentifierInfo();
}
@@ -340,6 +450,9 @@ struct FormatToken {
return Tok.getIdentifierInfo() &&
Tok.getIdentifierInfo()->getPPKeywordID() == Kind;
}
+ bool is(BraceBlockKind BBK) const { return getBlockKind() == BBK; }
+ bool is(ParameterPackingKind PPK) const { return getPackingKind() == PPK; }
+
template <typename A, typename B> bool isOneOf(A K1, B K2) const {
return is(K1) || is(K2);
}
@@ -355,7 +468,7 @@ struct FormatToken {
}
bool closesScopeAfterBlock() const {
- if (BlockKind == BK_Block)
+ if (getBlockKind() == BK_Block)
return true;
if (closesScope())
return Previous->closesScopeAfterBlock();
@@ -391,6 +504,13 @@ struct FormatToken {
(!ColonRequired || (Next && Next->is(tok::colon)));
}
+ bool canBePointerOrReferenceQualifier() const {
+ return isOneOf(tok::kw_const, tok::kw_restrict, tok::kw_volatile,
+ tok::kw___attribute, tok::kw__Nonnull, tok::kw__Nullable,
+ tok::kw__Null_unspecified, tok::kw___ptr32, tok::kw___ptr64,
+ TT_AttributeMacro);
+ }
+
/// Determine whether the token is a simple-type-specifier.
bool isSimpleTypeSpecifier() const;
@@ -469,7 +589,10 @@ struct FormatToken {
case tok::kw_decltype:
case tok::kw_noexcept:
case tok::kw_static_assert:
+ case tok::kw__Atomic:
case tok::kw___attribute:
+ case tok::kw___underlying_type:
+ case tok::kw_requires:
return true;
default:
return false;
@@ -525,13 +648,13 @@ struct FormatToken {
/// list that should be indented with a block indent.
bool opensBlockOrBlockTypeList(const FormatStyle &Style) const {
// C# Does not indent object initialisers as continuations.
- if (is(tok::l_brace) && BlockKind == BK_BracedInit && Style.isCSharp())
+ if (is(tok::l_brace) && getBlockKind() == BK_BracedInit && Style.isCSharp())
return true;
if (is(TT_TemplateString) && opensScope())
return true;
return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
(is(tok::l_brace) &&
- (BlockKind == BK_Block || is(TT_DictLiteral) ||
+ (getBlockKind() == BK_Block || is(TT_DictLiteral) ||
(!Style.Cpp11BracedListStyle && NestingLevel == 0))) ||
(is(tok::less) && (Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto));
@@ -572,10 +695,12 @@ struct FormatToken {
: nullptr;
}
+ void copyFrom(const FormatToken &Tok) { *this = Tok; }
+
private:
- // Disallow copying.
+ // Only allow copying via the explicit copyFrom method.
FormatToken(const FormatToken &) = delete;
- void operator=(const FormatToken &) = delete;
+ FormatToken &operator=(const FormatToken &) = default;
template <typename A, typename... Ts>
bool startsSequenceInternal(A K1, Ts... Tokens) const {
@@ -602,8 +727,6 @@ private:
return Previous->endsSequenceInternal(K1, Tokens...);
return is(K1) && Previous && Previous->endsSequenceInternal(Tokens...);
}
-
- TokenType Type = TT_Unknown;
};
class ContinuationIndenter;
diff --git a/clang/lib/Format/FormatTokenLexer.cpp b/clang/lib/Format/FormatTokenLexer.cpp
index 1fd153d1112e..e9b096370dbb 100644
--- a/clang/lib/Format/FormatTokenLexer.cpp
+++ b/clang/lib/Format/FormatTokenLexer.cpp
@@ -33,12 +33,14 @@ FormatTokenLexer::FormatTokenLexer(
Encoding(Encoding), Allocator(Allocator), FirstInLineIndex(0),
FormattingDisabled(false), MacroBlockBeginRegex(Style.MacroBlockBegin),
MacroBlockEndRegex(Style.MacroBlockEnd) {
- Lex.reset(new Lexer(ID, SourceMgr.getBuffer(ID), SourceMgr,
+ Lex.reset(new Lexer(ID, SourceMgr.getBufferOrFake(ID), SourceMgr,
getFormattingLangOpts(Style)));
Lex->SetKeepWhitespaceMode(true);
for (const std::string &ForEachMacro : Style.ForEachMacros)
Macros.insert({&IdentTable.get(ForEachMacro), TT_ForEachMacro});
+ for (const std::string &AttributeMacro : Style.AttributeMacros)
+ Macros.insert({&IdentTable.get(AttributeMacro), TT_AttributeMacro});
for (const std::string &StatementMacro : Style.StatementMacros)
Macros.insert({&IdentTable.get(StatementMacro), TT_StatementMacro});
for (const std::string &TypenameMacro : Style.TypenameMacros)
@@ -50,6 +52,10 @@ FormatTokenLexer::FormatTokenLexer(
Macros.insert(
{&IdentTable.get(WhitespaceSensitiveMacro), TT_UntouchableMacroFunc});
}
+ for (const std::string &StatementAttributeLikeMacro :
+ Style.StatementAttributeLikeMacros)
+ Macros.insert({&IdentTable.get(StatementAttributeLikeMacro),
+ TT_StatementAttributeLikeMacro});
}
ArrayRef<FormatToken *> FormatTokenLexer::lex() {
@@ -119,6 +125,10 @@ void FormatTokenLexer::tryMergePreviousTokens() {
tok::period};
static const tok::TokenKind JSNullishOperator[] = {tok::question,
tok::question};
+ static const tok::TokenKind JSNullishEqual[] = {tok::question,
+ tok::question, tok::equal};
+ static const tok::TokenKind JSPipePipeEqual[] = {tok::pipepipe, tok::equal};
+ static const tok::TokenKind JSAndAndEqual[] = {tok::ampamp, tok::equal};
// FIXME: Investigate what token type gives the correct operator priority.
if (tryMergeTokens(JSIdentity, TT_BinaryOperator))
@@ -146,6 +156,13 @@ void FormatTokenLexer::tryMergePreviousTokens() {
Tokens.back()->Tok.setKind(tok::period);
return;
}
+ if (tryMergeTokens(JSAndAndEqual, TT_JsAndAndEqual) ||
+ tryMergeTokens(JSPipePipeEqual, TT_JsPipePipeEqual) ||
+ tryMergeTokens(JSNullishEqual, TT_JsNullishCoalescingEqual)) {
+ // Treat like the "=" assignment operator.
+ Tokens.back()->Tok.setKind(tok::equal);
+ return;
+ }
if (tryMergeJSPrivateIdentifier())
return;
}
@@ -399,7 +416,7 @@ bool FormatTokenLexer::tryTransformTryUsageForC() {
if (!Try->is(tok::kw_try))
return false;
auto &Next = *(Tokens.end() - 1);
- if (Next->isOneOf(tok::l_brace, tok::colon))
+ if (Next->isOneOf(tok::l_brace, tok::colon, tok::hash, tok::comment))
return false;
if (Tokens.size() > 2) {
@@ -761,7 +778,7 @@ bool FormatTokenLexer::tryMergeConflictMarkers() {
unsigned FirstInLineOffset;
std::tie(ID, FirstInLineOffset) = SourceMgr.getDecomposedLoc(
Tokens[FirstInLineIndex]->getStartOfNonWhitespace());
- StringRef Buffer = SourceMgr.getBuffer(ID)->getBuffer();
+ StringRef Buffer = SourceMgr.getBufferOrFake(ID).getBuffer();
// Calculate the offset of the start of the current line.
auto LineOffset = Buffer.rfind('\n', FirstInLineOffset);
if (LineOffset == StringRef::npos) {
diff --git a/clang/lib/Format/MacroExpander.cpp b/clang/lib/Format/MacroExpander.cpp
new file mode 100644
index 000000000000..e50c80446963
--- /dev/null
+++ b/clang/lib/Format/MacroExpander.cpp
@@ -0,0 +1,224 @@
+//===--- MacroExpander.cpp - Format C++ code --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the implementation of MacroExpander, which handles macro
+/// configuration and expansion while formatting.
+///
+//===----------------------------------------------------------------------===//
+
+#include "Macros.h"
+
+#include "Encoding.h"
+#include "FormatToken.h"
+#include "FormatTokenLexer.h"
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Format/Format.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+namespace format {
+
+struct MacroExpander::Definition {
+ StringRef Name;
+ SmallVector<FormatToken *, 8> Params;
+ SmallVector<FormatToken *, 8> Body;
+
+ // Map from each argument's name to its position in the argument list.
+ // With "M(x, y) x + y":
+ // x -> 0
+ // y -> 1
+ llvm::StringMap<size_t> ArgMap;
+
+ bool ObjectLike = true;
+};
+
+class MacroExpander::DefinitionParser {
+public:
+ DefinitionParser(ArrayRef<FormatToken *> Tokens) : Tokens(Tokens) {
+ assert(!Tokens.empty());
+ Current = Tokens[0];
+ }
+
+ // Parse the token stream and return the corresonding Definition object.
+ // Returns an empty definition object with a null-Name on error.
+ MacroExpander::Definition parse() {
+ if (!Current->is(tok::identifier))
+ return {};
+ Def.Name = Current->TokenText;
+ nextToken();
+ if (Current->is(tok::l_paren)) {
+ Def.ObjectLike = false;
+ if (!parseParams())
+ return {};
+ }
+ if (!parseExpansion())
+ return {};
+
+ return Def;
+ }
+
+private:
+ bool parseParams() {
+ assert(Current->is(tok::l_paren));
+ nextToken();
+ while (Current->is(tok::identifier)) {
+ Def.Params.push_back(Current);
+ Def.ArgMap[Def.Params.back()->TokenText] = Def.Params.size() - 1;
+ nextToken();
+ if (Current->isNot(tok::comma))
+ break;
+ nextToken();
+ }
+ if (Current->isNot(tok::r_paren))
+ return false;
+ nextToken();
+ return true;
+ }
+
+ bool parseExpansion() {
+ if (!Current->isOneOf(tok::equal, tok::eof))
+ return false;
+ if (Current->is(tok::equal))
+ nextToken();
+ parseTail();
+ return true;
+ }
+
+ void parseTail() {
+ while (Current->isNot(tok::eof)) {
+ Def.Body.push_back(Current);
+ nextToken();
+ }
+ Def.Body.push_back(Current);
+ }
+
+ void nextToken() {
+ if (Pos + 1 < Tokens.size())
+ ++Pos;
+ Current = Tokens[Pos];
+ Current->Finalized = true;
+ }
+
+ size_t Pos = 0;
+ FormatToken *Current = nullptr;
+ Definition Def;
+ ArrayRef<FormatToken *> Tokens;
+};
+
+MacroExpander::MacroExpander(
+ const std::vector<std::string> &Macros, clang::SourceManager &SourceMgr,
+ const FormatStyle &Style,
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
+ IdentifierTable &IdentTable)
+ : SourceMgr(SourceMgr), Style(Style), Allocator(Allocator),
+ IdentTable(IdentTable) {
+ for (const std::string &Macro : Macros) {
+ parseDefinition(Macro);
+ }
+}
+
+MacroExpander::~MacroExpander() = default;
+
+void MacroExpander::parseDefinition(const std::string &Macro) {
+ Buffers.push_back(
+ llvm::MemoryBuffer::getMemBufferCopy(Macro, "<scratch space>"));
+ clang::FileID FID = SourceMgr.createFileID(Buffers.back()->getMemBufferRef());
+ FormatTokenLexer Lex(SourceMgr, FID, 0, Style, encoding::Encoding_UTF8,
+ Allocator, IdentTable);
+ const auto Tokens = Lex.lex();
+ if (!Tokens.empty()) {
+ DefinitionParser Parser(Tokens);
+ auto Definition = Parser.parse();
+ Definitions[Definition.Name] = std::move(Definition);
+ }
+}
+
+bool MacroExpander::defined(llvm::StringRef Name) const {
+ return Definitions.find(Name) != Definitions.end();
+}
+
+bool MacroExpander::objectLike(llvm::StringRef Name) const {
+ return Definitions.find(Name)->second.ObjectLike;
+}
+
+llvm::SmallVector<FormatToken *, 8> MacroExpander::expand(FormatToken *ID,
+ ArgsList Args) const {
+ assert(defined(ID->TokenText));
+ SmallVector<FormatToken *, 8> Result;
+ const Definition &Def = Definitions.find(ID->TokenText)->second;
+
+ // Expand each argument at most once.
+ llvm::StringSet<> ExpandedArgs;
+
+ // Adds the given token to Result.
+ auto pushToken = [&](FormatToken *Tok) {
+ Tok->MacroCtx->ExpandedFrom.push_back(ID);
+ Result.push_back(Tok);
+ };
+
+ // If Tok references a parameter, adds the corresponding argument to Result.
+ // Returns false if Tok does not reference a parameter.
+ auto expandArgument = [&](FormatToken *Tok) -> bool {
+ // If the current token references a parameter, expand the corresponding
+ // argument.
+ if (!Tok->is(tok::identifier) || ExpandedArgs.contains(Tok->TokenText))
+ return false;
+ ExpandedArgs.insert(Tok->TokenText);
+ auto I = Def.ArgMap.find(Tok->TokenText);
+ if (I == Def.ArgMap.end())
+ return false;
+ // If there are fewer arguments than referenced parameters, treat the
+ // parameter as empty.
+ // FIXME: Potentially fully abort the expansion instead.
+ if (I->getValue() >= Args.size())
+ return true;
+ for (FormatToken *Arg : Args[I->getValue()]) {
+ // A token can be part of a macro argument at multiple levels.
+ // For example, with "ID(x) x":
+ // in ID(ID(x)), 'x' is expanded first as argument to the inner
+ // ID, then again as argument to the outer ID. We keep the macro
+ // role the token had from the inner expansion.
+ if (!Arg->MacroCtx)
+ Arg->MacroCtx = MacroExpansion(MR_ExpandedArg);
+ pushToken(Arg);
+ }
+ return true;
+ };
+
+ // Expand the definition into Result.
+ for (FormatToken *Tok : Def.Body) {
+ if (expandArgument(Tok))
+ continue;
+ // Create a copy of the tokens from the macro body, i.e. were not provided
+ // by user code.
+ FormatToken *New = new (Allocator.Allocate()) FormatToken;
+ New->copyFrom(*Tok);
+ assert(!New->MacroCtx);
+ // Tokens that are not part of the user code are not formatted.
+ New->MacroCtx = MacroExpansion(MR_Hidden);
+ pushToken(New);
+ }
+ assert(Result.size() >= 1 && Result.back()->is(tok::eof));
+ if (Result.size() > 1) {
+ ++Result[0]->MacroCtx->StartOfExpansion;
+ ++Result[Result.size() - 2]->MacroCtx->EndOfExpansion;
+ }
+ return Result;
+}
+
+} // namespace format
+} // namespace clang
diff --git a/clang/lib/Format/Macros.h b/clang/lib/Format/Macros.h
new file mode 100644
index 000000000000..591ef8b5be3c
--- /dev/null
+++ b/clang/lib/Format/Macros.h
@@ -0,0 +1,141 @@
+//===--- MacroExpander.h - Format C++ code ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the main building blocks of macro support in
+/// clang-format.
+///
+/// In order to not violate the requirement that clang-format can format files
+/// in isolation, clang-format's macro support uses expansions users provide
+/// as part of clang-format's style configuration.
+///
+/// Macro definitions are of the form "MACRO(p1, p2)=p1 + p2", but only support
+/// one level of expansion (\see MacroExpander for a full description of what
+/// is supported).
+///
+/// As part of parsing, clang-format uses the MacroExpander to expand the
+/// spelled token streams into expanded token streams when it encounters a
+/// macro call. The UnwrappedLineParser continues to parse UnwrappedLines
+/// from the expanded token stream.
+/// After the expanded unwrapped lines are parsed, the MacroUnexpander matches
+/// the spelled token stream into unwrapped lines that best resemble the
+/// structure of the expanded unwrapped lines.
+///
+/// When formatting, clang-format formats the expanded unwrapped lines first,
+/// determining the token types. Next, it formats the spelled unwrapped lines,
+/// keeping the token types fixed, while allowing other formatting decisions
+/// to change.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_FORMAT_MACROS_H
+#define CLANG_LIB_FORMAT_MACROS_H
+
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "Encoding.h"
+#include "FormatToken.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+class MemoryBuffer;
+} // namespace llvm
+
+namespace clang {
+class IdentifierTable;
+class SourceManager;
+
+namespace format {
+struct FormatStyle;
+
+/// Takes a set of macro definitions as strings and allows expanding calls to
+/// those macros.
+///
+/// For example:
+/// Definition: A(x, y)=x + y
+/// Call : A(int a = 1, 2)
+/// Expansion : int a = 1 + 2
+///
+/// Expansion does not check arity of the definition.
+/// If fewer arguments than expected are provided, the remaining parameters
+/// are considered empty:
+/// Call : A(a)
+/// Expansion: a +
+/// If more arguments than expected are provided, they will be discarded.
+///
+/// The expander does not support:
+/// - recursive expansion
+/// - stringification
+/// - concatenation
+/// - variadic macros
+///
+/// Furthermore, only a single expansion of each macro argument is supported,
+/// so that we cannot get conflicting formatting decisions from different
+/// expansions.
+/// Definition: A(x)=x+x
+/// Call : A(id)
+/// Expansion : id+x
+///
+class MacroExpander {
+public:
+ using ArgsList = llvm::ArrayRef<llvm::SmallVector<FormatToken *, 8>>;
+
+ /// Construct a macro expander from a set of macro definitions.
+ /// Macro definitions must be encoded as UTF-8.
+ ///
+ /// Each entry in \p Macros must conform to the following simple
+ /// macro-definition language:
+ /// <definition> ::= <id> <expansion> | <id> "(" <params> ")" <expansion>
+ /// <params> ::= <id-list> | ""
+ /// <id-list> ::= <id> | <id> "," <params>
+ /// <expansion> ::= "=" <tail> | <eof>
+ /// <tail> ::= <tok> <tail> | <eof>
+ ///
+ /// Macros that cannot be parsed will be silently discarded.
+ ///
+ MacroExpander(const std::vector<std::string> &Macros,
+ clang::SourceManager &SourceMgr, const FormatStyle &Style,
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
+ IdentifierTable &IdentTable);
+ ~MacroExpander();
+
+ /// Returns whether a macro \p Name is defined.
+ bool defined(llvm::StringRef Name) const;
+
+ /// Returns whether the macro has no arguments and should not consume
+ /// subsequent parentheses.
+ bool objectLike(llvm::StringRef Name) const;
+
+ /// Returns the expanded stream of format tokens for \p ID, where
+ /// each element in \p Args is a positional argument to the macro call.
+ llvm::SmallVector<FormatToken *, 8> expand(FormatToken *ID,
+ ArgsList Args) const;
+
+private:
+ struct Definition;
+ class DefinitionParser;
+
+ void parseDefinition(const std::string &Macro);
+
+ clang::SourceManager &SourceMgr;
+ const FormatStyle &Style;
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator;
+ IdentifierTable &IdentTable;
+ std::vector<std::unique_ptr<llvm::MemoryBuffer>> Buffers;
+ llvm::StringMap<Definition> Definitions;
+};
+
+} // namespace format
+} // namespace clang
+
+#endif
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 7f8e35126512..34c291ecc492 100644..100755
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -27,7 +27,7 @@ namespace format {
namespace {
/// Returns \c true if the token can be used as an identifier in
-/// an Objective-C \c @selector, \c false otherwise.
+/// an Objective-C \c \@selector, \c false otherwise.
///
/// Because getFormattingLangOpts() always lexes source code as
/// Objective-C++, C++ keywords like \c new and \c delete are
@@ -56,6 +56,13 @@ static bool isLambdaParameterList(const FormatToken *Left) {
Left->Previous->MatchingParen->is(TT_LambdaLSquare);
}
+/// Returns \c true if the token is followed by a boolean condition, \c false
+/// otherwise.
+static bool isKeywordWithCondition(const FormatToken &Tok) {
+ return Tok.isOneOf(tok::kw_if, tok::kw_for, tok::kw_while, tok::kw_switch,
+ tok::kw_constexpr, tok::kw_catch);
+}
+
/// A parser that gathers additional information about tokens.
///
/// The \c TokenAnnotator tries to match parenthesis and square brakets and
@@ -108,6 +115,12 @@ private:
while (CurrentToken) {
if (CurrentToken->is(tok::greater)) {
+ // Try to do a better job at looking for ">>" within the condition of
+ // a statement.
+ if (CurrentToken->Next && CurrentToken->Next->is(tok::greater) &&
+ Left->ParentBracket != tok::less &&
+ isKeywordWithCondition(*Line.First))
+ return false;
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
// In TT_Proto, we must distignuish between:
@@ -185,6 +198,8 @@ private:
if (!CurrentToken)
return false;
FormatToken *Left = CurrentToken->Previous;
+ assert(Left && "Unknown previous token");
+ FormatToken *PrevNonComment = Left->getPreviousNonComment();
Left->ParentBracket = Contexts.back().ContextKind;
ScopedContextCreator ContextCreator(*this, tok::l_paren, 1);
@@ -216,9 +231,8 @@ private:
// export type X = (...);
Contexts.back().IsExpression = false;
} else if (Left->Previous &&
- (Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_decltype,
- tok::kw_while, tok::l_paren,
- tok::comma) ||
+ (Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_while,
+ tok::l_paren, tok::comma) ||
Left->Previous->isIf() ||
Left->Previous->is(TT_BinaryOperator))) {
// static_assert, if and while usually contain expressions.
@@ -242,8 +256,6 @@ private:
} else if (Contexts[Contexts.size() - 2].CaretFound) {
// This is the parameter list of an ObjC block.
Contexts.back().IsExpression = false;
- } else if (Left->Previous && Left->Previous->is(tok::kw___attribute)) {
- Left->setType(TT_AttributeParen);
} else if (Left->Previous && Left->Previous->is(TT_ForEachMacro)) {
// The first argument to a foreach macro is a declaration.
Contexts.back().IsForEachMacro = true;
@@ -257,6 +269,21 @@ private:
Contexts.back().IsExpression = !IsForOrCatch;
}
+ // Infer the role of the l_paren based on the previous token if we haven't
+ // detected one one yet.
+ if (PrevNonComment && Left->is(TT_Unknown)) {
+ if (PrevNonComment->is(tok::kw___attribute)) {
+ Left->setType(TT_AttributeParen);
+ } else if (PrevNonComment->isOneOf(TT_TypenameMacro, tok::kw_decltype,
+ tok::kw_typeof, tok::kw__Atomic,
+ tok::kw___underlying_type)) {
+ Left->setType(TT_TypeDeclarationParen);
+ // decltype() and typeof() usually contain expressions.
+ if (PrevNonComment->isOneOf(tok::kw_decltype, tok::kw_typeof))
+ Contexts.back().IsExpression = true;
+ }
+ }
+
if (StartsObjCMethodExpr) {
Contexts.back().ColonIsObjCMethodExpr = true;
Left->setType(TT_ObjCMethodExpr);
@@ -335,6 +362,8 @@ private:
if (Left->is(TT_AttributeParen))
CurrentToken->setType(TT_AttributeParen);
+ if (Left->is(TT_TypeDeclarationParen))
+ CurrentToken->setType(TT_TypeDeclarationParen);
if (Left->Previous && Left->Previous->is(TT_JavaAnnotation))
CurrentToken->setType(TT_JavaAnnotation);
if (Left->Previous && Left->Previous->is(TT_LeadingJavaAnnotation))
@@ -343,11 +372,11 @@ private:
CurrentToken->setType(TT_AttributeSquare);
if (!HasMultipleLines)
- Left->PackingKind = PPK_Inconclusive;
+ Left->setPackingKind(PPK_Inconclusive);
else if (HasMultipleParametersOnALine)
- Left->PackingKind = PPK_BinPacked;
+ Left->setPackingKind(PPK_BinPacked);
else
- Left->PackingKind = PPK_OnePerLine;
+ Left->setPackingKind(PPK_OnePerLine);
next();
return true;
@@ -704,7 +733,7 @@ private:
ScopedContextCreator ContextCreator(*this, tok::l_brace, 1);
Contexts.back().ColonIsDictLiteral = true;
- if (Left->BlockKind == BK_BracedInit)
+ if (Left->is(BK_BracedInit))
Contexts.back().IsExpression = true;
if (Style.Language == FormatStyle::LK_JavaScript && Left->Previous &&
Left->Previous->is(TT_JsTypeColon))
@@ -751,7 +780,7 @@ private:
// For ObjC methods, the number of parameters is calculated differently as
// method declarations have a different structure (the parameters are not
// inside a bracket scope).
- if (Current->is(tok::l_brace) && Current->BlockKind == BK_Block)
+ if (Current->is(tok::l_brace) && Current->is(BK_Block))
++Left->BlockParameterCount;
if (Current->is(tok::comma)) {
++Left->ParameterCount;
@@ -867,7 +896,8 @@ private:
} else if (CurrentToken && CurrentToken->is(tok::numeric_constant)) {
Tok->setType(TT_BitFieldColon);
} else if (Contexts.size() == 1 &&
- !Line.First->isOneOf(tok::kw_enum, tok::kw_case)) {
+ !Line.First->isOneOf(tok::kw_enum, tok::kw_case,
+ tok::kw_default)) {
FormatToken *Prev = Tok->getPreviousNonComment();
if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept))
Tok->setType(TT_CtorInitializerColon);
@@ -940,9 +970,9 @@ private:
return false;
if (Line.MustBeDeclaration && Contexts.size() == 1 &&
!Contexts.back().IsExpression && !Line.startsWith(TT_ObjCProperty) &&
- (!Tok->Previous ||
- !Tok->Previous->isOneOf(tok::kw_decltype, tok::kw___attribute,
- TT_LeadingJavaAnnotation)))
+ !Tok->is(TT_TypeDeclarationParen) &&
+ (!Tok->Previous || !Tok->Previous->isOneOf(tok::kw___attribute,
+ TT_LeadingJavaAnnotation)))
Line.MightBeFunctionDecl = true;
break;
case tok::l_square:
@@ -1333,11 +1363,13 @@ private:
// Reset token type in case we have already looked at it and then
// recovered from an error (e.g. failure to find the matching >).
if (!CurrentToken->isOneOf(
- TT_LambdaLSquare, TT_LambdaLBrace, TT_ForEachMacro,
- TT_TypenameMacro, TT_FunctionLBrace, TT_ImplicitStringLiteral,
- TT_InlineASMBrace, TT_JsFatArrow, TT_LambdaArrow, TT_NamespaceMacro,
- TT_OverloadedOperator, TT_RegexLiteral, TT_TemplateString,
- TT_ObjCStringLiteral, TT_UntouchableMacroFunc))
+ TT_LambdaLSquare, TT_LambdaLBrace, TT_AttributeMacro,
+ TT_ForEachMacro, TT_TypenameMacro, TT_FunctionLBrace,
+ TT_ImplicitStringLiteral, TT_InlineASMBrace, TT_JsFatArrow,
+ TT_LambdaArrow, TT_NamespaceMacro, TT_OverloadedOperator,
+ TT_RegexLiteral, TT_TemplateString, TT_ObjCStringLiteral,
+ TT_UntouchableMacroFunc, TT_ConstraintJunctions,
+ TT_StatementAttributeLikeMacro))
CurrentToken->setType(TT_Unknown);
CurrentToken->Role.reset();
CurrentToken->MatchingParen = nullptr;
@@ -1591,7 +1623,11 @@ private:
!Current.Previous->is(tok::kw_operator)) {
// not auto operator->() -> xxx;
Current.setType(TT_TrailingReturnArrow);
-
+ } else if (Current.is(tok::arrow) && Current.Previous &&
+ Current.Previous->is(tok::r_brace)) {
+ // Concept implicit conversion contraint needs to be treated like
+ // a trailing return type ... } -> <type>.
+ Current.setType(TT_TrailingReturnArrow);
} else if (isDeductionGuide(Current)) {
// Deduction guides trailing arrow " A(...) -> A<T>;".
Current.setType(TT_TrailingReturnArrow);
@@ -1692,8 +1728,8 @@ private:
// colon after this, this is the only place which annotates the identifier
// as a selector.)
Current.setType(TT_SelectorName);
- } else if (Current.isOneOf(tok::identifier, tok::kw_const,
- tok::kw_noexcept) &&
+ } else if (Current.isOneOf(tok::identifier, tok::kw_const, tok::kw_noexcept,
+ tok::kw_requires) &&
Current.Previous &&
!Current.Previous->isOneOf(tok::equal, tok::at) &&
Line.MightBeFunctionDecl && Contexts.size() == 1) {
@@ -1753,9 +1789,8 @@ private:
PreviousNotConst->MatchingParen->Previous->isNot(tok::period) &&
PreviousNotConst->MatchingParen->Previous->isNot(tok::kw_template);
- if (PreviousNotConst->is(tok::r_paren) && PreviousNotConst->MatchingParen &&
- PreviousNotConst->MatchingParen->Previous &&
- PreviousNotConst->MatchingParen->Previous->is(tok::kw_decltype))
+ if (PreviousNotConst->is(tok::r_paren) &&
+ PreviousNotConst->is(TT_TypeDeclarationParen))
return true;
return (!IsPPKeyword &&
@@ -1810,8 +1845,8 @@ private:
// Functions which end with decorations like volatile, noexcept are unlikely
// to be casts.
if (Tok.Next->isOneOf(tok::kw_noexcept, tok::kw_volatile, tok::kw_const,
- tok::kw_throw, tok::arrow, Keywords.kw_override,
- Keywords.kw_final) ||
+ tok::kw_requires, tok::kw_throw, tok::arrow,
+ Keywords.kw_override, Keywords.kw_final) ||
isCpp11AttributeSpecifier(*Tok.Next))
return false;
@@ -1827,10 +1862,38 @@ private:
return true;
// Heuristically try to determine whether the parentheses contain a type.
+ auto IsQualifiedPointerOrReference = [](FormatToken *T) {
+ // This is used to handle cases such as x = (foo *const)&y;
+ assert(!T->isSimpleTypeSpecifier() && "Should have already been checked");
+ // Strip trailing qualifiers such as const or volatile when checking
+ // whether the parens could be a cast to a pointer/reference type.
+ while (T) {
+ if (T->is(TT_AttributeParen)) {
+ // Handle `x = (foo *__attribute__((foo)))&v;`:
+ if (T->MatchingParen && T->MatchingParen->Previous &&
+ T->MatchingParen->Previous->is(tok::kw___attribute)) {
+ T = T->MatchingParen->Previous->Previous;
+ continue;
+ }
+ } else if (T->is(TT_AttributeSquare)) {
+ // Handle `x = (foo *[[clang::foo]])&v;`:
+ if (T->MatchingParen && T->MatchingParen->Previous) {
+ T = T->MatchingParen->Previous;
+ continue;
+ }
+ } else if (T->canBePointerOrReferenceQualifier()) {
+ T = T->Previous;
+ continue;
+ }
+ break;
+ }
+ return T && T->is(TT_PointerOrReference);
+ };
bool ParensAreType =
!Tok.Previous ||
- Tok.Previous->isOneOf(TT_PointerOrReference, TT_TemplateCloser) ||
- Tok.Previous->isSimpleTypeSpecifier();
+ Tok.Previous->isOneOf(TT_TemplateCloser, TT_TypeDeclarationParen) ||
+ Tok.Previous->isSimpleTypeSpecifier() ||
+ IsQualifiedPointerOrReference(Tok.Previous);
bool ParensCouldEndDecl =
Tok.Next->isOneOf(tok::equal, tok::semi, tok::l_brace, tok::greater);
if (ParensAreType && !ParensCouldEndDecl)
@@ -1854,6 +1917,13 @@ private:
if (Tok.Next->isOneOf(tok::identifier, tok::kw_this))
return true;
+ if (Tok.Next->is(tok::l_paren) &&
+ !(Tok.Previous && Tok.Previous->is(tok::identifier) &&
+ Tok.Previous->Previous &&
+ Tok.Previous->Previous->isOneOf(tok::arrowstar, tok::arrow,
+ tok::star)))
+ return true;
+
if (!Tok.Next->Next)
return false;
@@ -1890,18 +1960,22 @@ private:
const FormatToken *NextToken = Tok.getNextNonComment();
if (!NextToken ||
- NextToken->isOneOf(tok::arrow, tok::equal, tok::kw_const,
- tok::kw_noexcept) ||
+ NextToken->isOneOf(tok::arrow, tok::equal, tok::kw_noexcept) ||
+ NextToken->canBePointerOrReferenceQualifier() ||
(NextToken->is(tok::l_brace) && !NextToken->getNextNonComment()))
return TT_PointerOrReference;
if (PrevToken->is(tok::coloncolon))
return TT_PointerOrReference;
+ if (PrevToken->is(tok::r_paren) && PrevToken->is(TT_TypeDeclarationParen))
+ return TT_PointerOrReference;
+
if (PrevToken->isOneOf(tok::l_paren, tok::l_square, tok::l_brace,
tok::comma, tok::semi, tok::kw_return, tok::colon,
- tok::equal, tok::kw_delete, tok::kw_sizeof,
- tok::kw_throw) ||
+ tok::kw_co_return, tok::kw_co_await,
+ tok::kw_co_yield, tok::equal, tok::kw_delete,
+ tok::kw_sizeof, tok::kw_throw) ||
PrevToken->isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
TT_UnaryOperator, TT_CastRParen))
return TT_UnaryOperator;
@@ -1913,15 +1987,6 @@ private:
if (NextToken->isOneOf(tok::comma, tok::semi))
return TT_PointerOrReference;
- if (PrevToken->is(tok::r_paren) && PrevToken->MatchingParen) {
- FormatToken *TokenBeforeMatchingParen =
- PrevToken->MatchingParen->getPreviousNonComment();
- if (TokenBeforeMatchingParen &&
- TokenBeforeMatchingParen->isOneOf(tok::kw_typeof, tok::kw_decltype,
- TT_TypenameMacro))
- return TT_PointerOrReference;
- }
-
if (PrevToken->Tok.isLiteral() ||
PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::kw_true,
tok::kw_false, tok::r_brace) ||
@@ -2367,6 +2432,8 @@ static bool isFunctionDeclarationName(const FormatToken &Current,
return true;
for (const FormatToken *Tok = Next->Next; Tok && Tok != Next->MatchingParen;
Tok = Tok->Next) {
+ if (Tok->is(TT_TypeDeclarationParen))
+ return true;
if (Tok->isOneOf(tok::l_paren, TT_TemplateOpener) && Tok->MatchingParen) {
Tok = Tok->MatchingParen;
continue;
@@ -2420,7 +2487,7 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
if (isFunctionDeclarationName(*Current, Line))
Current->setType(TT_FunctionDeclarationName);
if (Current->is(TT_LineComment)) {
- if (Current->Previous->BlockKind == BK_BracedInit &&
+ if (Current->Previous->is(BK_BracedInit) &&
Current->Previous->opensScope())
Current->SpacesRequiredBefore =
(Style.Cpp11BracedListStyle && !Style.SpacesInParentheses) ? 0 : 1;
@@ -2681,7 +2748,11 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
if (Left.is(TT_TemplateOpener))
return 100;
if (Left.opensScope()) {
- if (Style.AlignAfterOpenBracket == FormatStyle::BAS_DontAlign)
+ // If we aren't aligning after opening parens/braces we can always break
+ // here unless the style does not want us to place all arguments on the
+ // next line.
+ if (Style.AlignAfterOpenBracket == FormatStyle::BAS_DontAlign &&
+ (Left.ParameterCount <= 1 || Style.AllowAllArgumentsOnNextLine))
return 0;
if (Left.is(tok::l_brace) && !Style.Cpp11BracedListStyle)
return 19;
@@ -2733,13 +2804,6 @@ bool TokenAnnotator::spaceRequiredBeforeParens(const FormatToken &Right) const {
Right.ParameterCount > 0);
}
-/// Returns \c true if the token is followed by a boolean condition, \c false
-/// otherwise.
-static bool isKeywordWithCondition(const FormatToken &Tok) {
- return Tok.isOneOf(tok::kw_if, tok::kw_for, tok::kw_while, tok::kw_switch,
- tok::kw_constexpr, tok::kw_catch);
-}
-
bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const FormatToken &Left,
const FormatToken &Right) {
@@ -2755,8 +2819,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.isOneOf(tok::hashhash, tok::hash))
return Right.is(tok::hash);
if ((Left.is(tok::l_paren) && Right.is(tok::r_paren)) ||
- (Left.is(tok::l_brace) && Left.BlockKind != BK_Block &&
- Right.is(tok::r_brace) && Right.BlockKind != BK_Block))
+ (Left.is(tok::l_brace) && Left.isNot(BK_Block) &&
+ Right.is(tok::r_brace) && Right.isNot(BK_Block)))
return Style.SpaceInEmptyParentheses;
if (Style.SpacesInConditionalStatement) {
if (Left.is(tok::l_paren) && Left.Previous &&
@@ -2767,6 +2831,14 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
isKeywordWithCondition(*Right.MatchingParen->Previous))
return true;
}
+
+ // requires ( or requires(
+ if (Right.is(tok::l_paren) && Left.is(tok::kw_requires))
+ return spaceRequiredBeforeParens(Right);
+ // requires clause Concept1<T> && Concept2<T>
+ if (Left.is(TT_ConstraintJunctions) && Right.is(tok::identifier))
+ return true;
+
if (Left.is(tok::l_paren) || Right.is(tok::r_paren))
return (Right.is(TT_CastRParen) ||
(Left.MatchingParen && Left.MatchingParen->is(TT_CastRParen)))
@@ -2815,11 +2887,16 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return true;
FormatToken *TokenBeforeMatchingParen =
Left.MatchingParen->getPreviousNonComment();
- if (!TokenBeforeMatchingParen ||
- !TokenBeforeMatchingParen->isOneOf(tok::kw_typeof, tok::kw_decltype,
- TT_TypenameMacro))
+ if (!TokenBeforeMatchingParen || !Left.is(TT_TypeDeclarationParen))
return true;
}
+ // Add a space if the previous token is a pointer qualifer or the closing
+ // parenthesis of __attribute__(()) expression and the style requires spaces
+ // after pointer qualifiers.
+ if ((Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_After ||
+ Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) &&
+ (Left.is(TT_AttributeParen) || Left.canBePointerOrReferenceQualifier()))
+ return true;
return (Left.Tok.isLiteral() ||
(!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
(Style.PointerAlignment != FormatStyle::PAS_Left ||
@@ -2832,11 +2909,17 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
(Style.PointerAlignment != FormatStyle::PAS_Right &&
!Line.IsMultiVariableDeclStmt)))
return true;
- if (Left.is(TT_PointerOrReference))
+ if (Left.is(TT_PointerOrReference)) {
+ // Add a space if the next token is a pointer qualifer and the style
+ // requires spaces before pointer qualifiers.
+ if ((Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Before ||
+ Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) &&
+ Right.canBePointerOrReferenceQualifier())
+ return true;
return Right.Tok.isLiteral() || Right.is(TT_BlockComment) ||
(Right.isOneOf(Keywords.kw_override, Keywords.kw_final) &&
!Right.is(TT_StartOfName)) ||
- (Right.is(tok::l_brace) && Right.BlockKind == BK_Block) ||
+ (Right.is(tok::l_brace) && Right.is(BK_Block)) ||
(!Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
tok::l_paren) &&
(Style.PointerAlignment != FormatStyle::PAS_Right &&
@@ -2844,6 +2927,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Left.Previous &&
!Left.Previous->isOneOf(tok::l_paren, tok::coloncolon,
tok::l_square));
+ }
// Ensure right pointer alignement with ellipsis e.g. int *...P
if (Left.is(tok::ellipsis) && Left.Previous &&
Left.Previous->isOneOf(tok::star, tok::amp, tok::ampamp))
@@ -2921,9 +3005,9 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return false;
if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
return !Left.Children.empty(); // No spaces in "{}".
- if ((Left.is(tok::l_brace) && Left.BlockKind != BK_Block) ||
+ if ((Left.is(tok::l_brace) && Left.isNot(BK_Block)) ||
(Right.is(tok::r_brace) && Right.MatchingParen &&
- Right.MatchingParen->BlockKind != BK_Block))
+ Right.MatchingParen->isNot(BK_Block)))
return Style.Cpp11BracedListStyle ? Style.SpacesInParentheses : true;
if (Left.is(TT_BlockComment))
// No whitespace in x(/*foo=*/1), except for JavaScript.
@@ -2967,7 +3051,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
tok::r_paren) ||
Left.isSimpleTypeSpecifier()) &&
Right.is(tok::l_brace) && Right.getNextNonComment() &&
- Right.BlockKind != BK_Block)
+ Right.isNot(BK_Block))
return false;
if (Left.is(tok::period) || Right.is(tok::period))
return false;
@@ -3009,7 +3093,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Style.isCpp()) {
if (Left.is(tok::kw_operator))
return Right.is(tok::coloncolon);
- if (Right.is(tok::l_brace) && Right.BlockKind == BK_BracedInit &&
+ if (Right.is(tok::l_brace) && Right.is(BK_BracedInit) &&
!Left.opensScope() && Style.SpaceBeforeCpp11BracedList)
return true;
} else if (Style.Language == FormatStyle::LK_Proto ||
@@ -3115,6 +3199,16 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Keywords.kw_lock))
return Style.SpaceBeforeParens == FormatStyle::SBPO_ControlStatements ||
spaceRequiredBeforeParens(Right);
+
+ // space between method modifier and opening parenthesis of a tuple return
+ // type
+ if (Left.isOneOf(tok::kw_public, tok::kw_private, tok::kw_protected,
+ tok::kw_virtual, tok::kw_extern, tok::kw_static,
+ Keywords.kw_internal, Keywords.kw_abstract,
+ Keywords.kw_sealed, Keywords.kw_override,
+ Keywords.kw_async, Keywords.kw_unsafe) &&
+ Right.is(tok::l_paren))
+ return true;
} else if (Style.Language == FormatStyle::LK_JavaScript) {
if (Left.is(TT_JsFatArrow))
return true;
@@ -3251,9 +3345,13 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Right.is(TT_RangeBasedForLoopColon) &&
!Style.SpaceBeforeRangeBasedForLoopColon)
return false;
+ if (Left.is(TT_BitFieldColon))
+ return Style.BitFieldColonSpacing == FormatStyle::BFCS_Both ||
+ Style.BitFieldColonSpacing == FormatStyle::BFCS_After;
if (Right.is(tok::colon)) {
- if (Line.First->isOneOf(tok::kw_case, tok::kw_default) ||
- !Right.getNextNonComment() || Right.getNextNonComment()->is(tok::semi))
+ if (Line.First->isOneOf(tok::kw_default, tok::kw_case))
+ return Style.SpaceBeforeCaseColon;
+ if (!Right.getNextNonComment() || Right.getNextNonComment()->is(tok::semi))
return false;
if (Right.is(TT_ObjCMethodExpr))
return false;
@@ -3267,6 +3365,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return false;
if (Right.is(TT_CSharpNamedArgumentColon))
return false;
+ if (Right.is(TT_BitFieldColon))
+ return Style.BitFieldColonSpacing == FormatStyle::BFCS_Both ||
+ Style.BitFieldColonSpacing == FormatStyle::BFCS_Before;
return true;
}
if (Left.is(TT_UnaryOperator)) {
@@ -3356,7 +3457,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Returns 'true' if 'Tok' is a brace we'd want to break before in Allman style.
static bool isAllmanBrace(const FormatToken &Tok) {
- return Tok.is(tok::l_brace) && Tok.BlockKind == BK_Block &&
+ return Tok.is(tok::l_brace) && Tok.is(BK_Block) &&
!Tok.isOneOf(TT_ObjCBlockLBrace, TT_LambdaLBrace, TT_DictLiteral);
}
@@ -3392,7 +3493,7 @@ static bool isOneChildWithoutMustBreakBefore(const FormatToken &Tok) {
return true;
}
static bool isAllmanLambdaBrace(const FormatToken &Tok) {
- return (Tok.is(tok::l_brace) && Tok.BlockKind == BK_Block &&
+ return (Tok.is(tok::l_brace) && Tok.is(BK_Block) &&
!Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral));
}
@@ -3492,7 +3593,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if ((Left.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
(Style.Language == FormatStyle::LK_JavaScript &&
Left.is(tok::l_paren))) &&
- Left.BlockKind != BK_Block && Left.MatchingParen)
+ Left.isNot(BK_Block) && Left.MatchingParen)
BeforeClosingBrace = Left.MatchingParen->Previous;
else if (Right.MatchingParen &&
(Right.MatchingParen->isOneOf(tok::l_brace,
@@ -3506,8 +3607,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
if (Right.is(tok::comment))
- return Left.BlockKind != BK_BracedInit &&
- Left.isNot(TT_CtorInitializerColon) &&
+ return Left.isNot(BK_BracedInit) && Left.isNot(TT_CtorInitializerColon) &&
(Right.NewlinesBefore > 0 && Right.HasUnescapedNewline);
if (Left.isTrailingComment())
return true;
@@ -3517,11 +3617,17 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
Right.Previous->is(tok::string_literal) &&
Right.Next->is(tok::string_literal))
return true;
+ // Can break after template<> declaration
if (Right.Previous->ClosesTemplateDeclaration &&
Right.Previous->MatchingParen &&
- Right.Previous->MatchingParen->NestingLevel == 0 &&
- Style.AlwaysBreakTemplateDeclarations == FormatStyle::BTDS_Yes)
- return true;
+ Right.Previous->MatchingParen->NestingLevel == 0) {
+ // Put concepts on the next line e.g.
+ // template<typename T>
+ // concept ...
+ if (Right.is(tok::kw_concept))
+ return Style.BreakBeforeConceptDeclarations;
+ return (Style.AlwaysBreakTemplateDeclarations == FormatStyle::BTDS_Yes);
+ }
if (Right.is(TT_CtorInitializerComma) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma &&
!Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
@@ -3816,7 +3922,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// The first comment in a braced lists is always interpreted as belonging to
// the first list element. Otherwise, it should be placed outside of the
// list.
- return Left.BlockKind == BK_BracedInit ||
+ return Left.is(BK_BracedInit) ||
(Left.is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon);
if (Left.is(tok::question) && Right.is(tok::colon))
@@ -3900,7 +4006,8 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Left.is(tok::equal) && Right.is(tok::l_brace) &&
!Style.Cpp11BracedListStyle)
return false;
- if (Left.is(tok::l_paren) && Left.is(TT_AttributeParen))
+ if (Left.is(tok::l_paren) &&
+ Left.isOneOf(TT_AttributeParen, TT_TypeDeclarationParen))
return false;
if (Left.is(tok::l_paren) && Left.Previous &&
(Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen)))
@@ -3917,7 +4024,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// We only break before r_brace if there was a corresponding break before
// the l_brace, which is tracked by BreakBeforeClosingBrace.
if (Right.is(tok::r_brace))
- return Right.MatchingParen && Right.MatchingParen->BlockKind == BK_Block;
+ return Right.MatchingParen && Right.MatchingParen->is(BK_Block);
// Allow breaking after a trailing annotation, e.g. after a method
// declaration.
@@ -4002,9 +4109,9 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
<< " T=" << getTokenTypeName(Tok->getType())
<< " S=" << Tok->SpacesRequiredBefore
<< " F=" << Tok->Finalized << " B=" << Tok->BlockParameterCount
- << " BK=" << Tok->BlockKind << " P=" << Tok->SplitPenalty
+ << " BK=" << Tok->getBlockKind() << " P=" << Tok->SplitPenalty
<< " Name=" << Tok->Tok.getName() << " L=" << Tok->TotalLength
- << " PPK=" << Tok->PackingKind << " FakeLParens=";
+ << " PPK=" << Tok->getPackingKind() << " FakeLParens=";
for (unsigned i = 0, e = Tok->FakeLParens.size(); i != e; ++i)
llvm::errs() << Tok->FakeLParens[i] << "/";
llvm::errs() << " FakeRParens=" << Tok->FakeRParens;
diff --git a/clang/lib/Format/UnwrappedLineFormatter.cpp b/clang/lib/Format/UnwrappedLineFormatter.cpp
index 22f27a668dcc..d1138bbc9c36 100644
--- a/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -248,6 +248,11 @@ private:
return !Style.BraceWrapping.SplitEmptyRecord && EmptyBlock
? tryMergeSimpleBlock(I, E, Limit)
: 0;
+
+ if (Tok && Tok->is(tok::kw_template) &&
+ Style.BraceWrapping.SplitEmptyRecord && EmptyBlock) {
+ return 0;
+ }
}
// FIXME: TheLine->Level != 0 might or might not be the right check to do.
@@ -309,7 +314,8 @@ private:
// Try to merge a control statement block with left brace wrapped
if (I[1]->First->is(tok::l_brace) &&
(TheLine->First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for,
- tok::kw_switch, tok::kw_try, tok::kw_do) ||
+ tok::kw_switch, tok::kw_try, tok::kw_do,
+ TT_ForEachMacro) ||
(TheLine->First->is(tok::r_brace) && TheLine->First->Next &&
TheLine->First->Next->isOneOf(tok::kw_else, tok::kw_catch))) &&
Style.BraceWrapping.AfterControlStatement ==
@@ -354,6 +360,30 @@ private:
if (TheLine->First->is(tok::l_brace) && I != AnnotatedLines.begin() &&
I[-1]->First->isOneOf(tok::kw_case, tok::kw_default))
return 0;
+
+ // Don't merge an empty template class or struct if SplitEmptyRecords
+ // is defined.
+ if (Style.BraceWrapping.SplitEmptyRecord &&
+ TheLine->Last->is(tok::l_brace) && I != AnnotatedLines.begin() &&
+ I[-1]->Last) {
+ const FormatToken *Previous = I[-1]->Last;
+ if (Previous) {
+ if (Previous->is(tok::comment))
+ Previous = Previous->getPreviousNonComment();
+ if (Previous) {
+ if (Previous->is(tok::greater))
+ return 0;
+ if (Previous->is(tok::identifier)) {
+ const FormatToken *PreviousPrevious =
+ Previous->getPreviousNonComment();
+ if (PreviousPrevious &&
+ PreviousPrevious->isOneOf(tok::kw_class, tok::kw_struct))
+ return 0;
+ }
+ }
+ }
+ }
+
// Try to merge a block with left brace wrapped that wasn't yet covered
if (TheLine->Last->is(tok::l_brace)) {
return !Style.BraceWrapping.AfterFunction ||
@@ -606,7 +636,7 @@ private:
if (I[1]->Last->is(TT_LineComment))
return 0;
do {
- if (Tok->is(tok::l_brace) && Tok->BlockKind != BK_BracedInit)
+ if (Tok->is(tok::l_brace) && Tok->isNot(BK_BracedInit))
return 0;
Tok = Tok->Next;
} while (Tok);
@@ -767,8 +797,8 @@ protected:
unsigned &Penalty) {
const FormatToken *LBrace = State.NextToken->getPreviousNonComment();
FormatToken &Previous = *State.NextToken->Previous;
- if (!LBrace || LBrace->isNot(tok::l_brace) ||
- LBrace->BlockKind != BK_Block || Previous.Children.size() == 0)
+ if (!LBrace || LBrace->isNot(tok::l_brace) || LBrace->isNot(BK_Block) ||
+ Previous.Children.size() == 0)
// The previous token does not open a block. Nothing to do. We don't
// assert so that we can simply call this function for all tokens.
return true;
@@ -979,7 +1009,7 @@ private:
// State already examined with lower penalty.
continue;
- FormatDecision LastFormat = Node->State.NextToken->Decision;
+ FormatDecision LastFormat = Node->State.NextToken->getDecision();
if (LastFormat == FD_Unformatted || LastFormat == FD_Continue)
addNextStateToQueue(Penalty, Node, /*NewLine=*/false, &Count, &Queue);
if (LastFormat == FD_Unformatted || LastFormat == FD_Break)
@@ -1215,10 +1245,33 @@ void UnwrappedLineFormatter::formatFirstToken(
!startsExternCBlock(*PreviousLine))
Newlines = 1;
- // Insert extra new line before access specifiers.
- if (PreviousLine && PreviousLine->Last->isOneOf(tok::semi, tok::r_brace) &&
- RootToken.isAccessSpecifier() && RootToken.NewlinesBefore == 1)
- ++Newlines;
+ // Insert or remove empty line before access specifiers.
+ if (PreviousLine && RootToken.isAccessSpecifier()) {
+ switch (Style.EmptyLineBeforeAccessModifier) {
+ case FormatStyle::ELBAMS_Never:
+ if (RootToken.NewlinesBefore > 1)
+ Newlines = 1;
+ break;
+ case FormatStyle::ELBAMS_Leave:
+ Newlines = std::max(RootToken.NewlinesBefore, 1u);
+ break;
+ case FormatStyle::ELBAMS_LogicalBlock:
+ if (PreviousLine->Last->isOneOf(tok::semi, tok::r_brace) &&
+ RootToken.NewlinesBefore <= 1)
+ Newlines = 2;
+ break;
+ case FormatStyle::ELBAMS_Always: {
+ const FormatToken *previousToken;
+ if (PreviousLine->Last->is(tok::comment))
+ previousToken = PreviousLine->Last->getPreviousNonComment();
+ else
+ previousToken = PreviousLine->Last;
+ if ((!previousToken || !previousToken->is(tok::l_brace)) &&
+ RootToken.NewlinesBefore <= 1)
+ Newlines = 2;
+ } break;
+ }
+ }
// Remove empty lines after access specifiers.
if (PreviousLine && PreviousLine->First->isAccessSpecifier() &&
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index a37386425aae..f689a6361a3a 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -472,19 +472,19 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// individual members in a type member list, which would normally
// trigger BK_Block. In both cases, this must be parsed as an inline
// braced init.
- Tok->BlockKind = BK_BracedInit;
+ Tok->setBlockKind(BK_BracedInit);
else if (PrevTok->is(tok::r_paren))
// `) { }` can only occur in function or method declarations in JS.
- Tok->BlockKind = BK_Block;
+ Tok->setBlockKind(BK_Block);
} else {
- Tok->BlockKind = BK_Unknown;
+ Tok->setBlockKind(BK_Unknown);
}
LBraceStack.push_back(Tok);
break;
case tok::r_brace:
if (LBraceStack.empty())
break;
- if (LBraceStack.back()->BlockKind == BK_Unknown) {
+ if (LBraceStack.back()->is(BK_Unknown)) {
bool ProbablyBracedList = false;
if (Style.Language == FormatStyle::LK_Proto) {
ProbablyBracedList = NextTok->isOneOf(tok::comma, tok::r_square);
@@ -524,11 +524,11 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
}
}
if (ProbablyBracedList) {
- Tok->BlockKind = BK_BracedInit;
- LBraceStack.back()->BlockKind = BK_BracedInit;
+ Tok->setBlockKind(BK_BracedInit);
+ LBraceStack.back()->setBlockKind(BK_BracedInit);
} else {
- Tok->BlockKind = BK_Block;
- LBraceStack.back()->BlockKind = BK_Block;
+ Tok->setBlockKind(BK_Block);
+ LBraceStack.back()->setBlockKind(BK_Block);
}
}
LBraceStack.pop_back();
@@ -545,8 +545,8 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
case tok::kw_switch:
case tok::kw_try:
case tok::kw___try:
- if (!LBraceStack.empty() && LBraceStack.back()->BlockKind == BK_Unknown)
- LBraceStack.back()->BlockKind = BK_Block;
+ if (!LBraceStack.empty() && LBraceStack.back()->is(BK_Unknown))
+ LBraceStack.back()->setBlockKind(BK_Block);
break;
default:
break;
@@ -557,8 +557,8 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// Assume other blocks for all unclosed opening braces.
for (unsigned i = 0, e = LBraceStack.size(); i != e; ++i) {
- if (LBraceStack[i]->BlockKind == BK_Unknown)
- LBraceStack[i]->BlockKind = BK_Block;
+ if (LBraceStack[i]->is(BK_Unknown))
+ LBraceStack[i]->setBlockKind(BK_Block);
}
FormatTok = Tokens->setPosition(StoredPosition);
@@ -584,7 +584,7 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) &&
"'{' or macro block token expected");
const bool MacroBlock = FormatTok->is(TT_MacroBlockBegin);
- FormatTok->BlockKind = BK_Block;
+ FormatTok->setBlockKind(BK_Block);
size_t PPStartHash = computePPHash();
@@ -614,7 +614,7 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
if (MacroBlock ? !FormatTok->is(TT_MacroBlockEnd)
: !FormatTok->is(tok::r_brace)) {
Line->Level = InitialLevel;
- FormatTok->BlockKind = BK_Block;
+ FormatTok->setBlockKind(BK_Block);
return;
}
@@ -626,8 +626,16 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
if (MacroBlock && FormatTok->is(tok::l_paren))
parseParens();
+ if (FormatTok->is(tok::arrow)) {
+ // Following the } we can find a trailing return type arrow
+ // as part of an implicit conversion constraint.
+ nextToken();
+ parseStructuralElement();
+ }
+
if (MunchSemi && FormatTok->Tok.is(tok::semi))
nextToken();
+
Line->Level = InitialLevel;
if (PPStartHash == PPEndHash) {
@@ -690,7 +698,7 @@ static bool ShouldBreakBeforeBrace(const FormatStyle &Style,
}
void UnwrappedLineParser::parseChildBlock() {
- FormatTok->BlockKind = BK_Block;
+ FormatTok->setBlockKind(BK_Block);
nextToken();
{
bool SkipIndent = (Style.Language == FormatStyle::LK_JavaScript &&
@@ -1262,6 +1270,12 @@ void UnwrappedLineParser::parseStructuralElement() {
break;
}
break;
+ case tok::kw_concept:
+ parseConcept();
+ break;
+ case tok::kw_requires:
+ parseRequires();
+ break;
case tok::kw_enum:
// Ignore if this is part of "template <enum ...".
if (Previous && Previous->is(tok::less)) {
@@ -1476,7 +1490,7 @@ void UnwrappedLineParser::parseStructuralElement() {
// C# needs this change to ensure that array initialisers and object
// initialisers are indented the same way.
if (Style.isCSharp())
- FormatTok->BlockKind = BK_BracedInit;
+ FormatTok->setBlockKind(BK_BracedInit);
nextToken();
parseBracedList();
} else if (Style.Language == FormatStyle::LK_Proto &&
@@ -1747,10 +1761,10 @@ void UnwrappedLineParser::tryToParseJSFunction() {
}
bool UnwrappedLineParser::tryToParseBracedList() {
- if (FormatTok->BlockKind == BK_Unknown)
+ if (FormatTok->is(BK_Unknown))
calculateBraceTypes();
- assert(FormatTok->BlockKind != BK_Unknown);
- if (FormatTok->BlockKind == BK_Block)
+ assert(FormatTok->isNot(BK_Unknown));
+ if (FormatTok->is(BK_Block))
return false;
nextToken();
parseBracedList();
@@ -1830,7 +1844,7 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
case tok::l_brace:
// Assume there are no blocks inside a braced init list apart
// from the ones we explicitly parse out (like lambdas).
- FormatTok->BlockKind = BK_BracedInit;
+ FormatTok->setBlockKind(BK_BracedInit);
nextToken();
parseBracedList();
break;
@@ -2025,6 +2039,13 @@ void UnwrappedLineParser::parseTryCatch() {
nextToken();
if (FormatTok->is(tok::l_paren))
parseParens();
+ if (FormatTok->Previous && FormatTok->Previous->is(tok::identifier) &&
+ FormatTok->is(tok::l_brace)) {
+ do {
+ nextToken();
+ } while (!FormatTok->is(tok::r_brace));
+ nextToken();
+ }
// In case identifiers were removed by clang-tidy, what might follow is
// multiple commas in sequence - after the first identifier.
@@ -2212,18 +2233,26 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
--Line->Level;
if (LeftAlignLabel)
Line->Level = 0;
+
+ bool RemoveWhitesmithsCaseIndent =
+ (!Style.IndentCaseBlocks &&
+ Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths);
+
+ if (RemoveWhitesmithsCaseIndent)
+ --Line->Level;
+
if (!Style.IndentCaseBlocks && CommentsBeforeNextToken.empty() &&
FormatTok->Tok.is(tok::l_brace)) {
- CompoundStatementIndenter Indenter(this, Line->Level,
- Style.BraceWrapping.AfterCaseLabel,
- Style.BraceWrapping.IndentBraces);
+
+ CompoundStatementIndenter Indenter(
+ this, Line->Level, Style.BraceWrapping.AfterCaseLabel,
+ Style.BraceWrapping.IndentBraces || RemoveWhitesmithsCaseIndent);
parseBlock(/*MustBeDeclaration=*/false);
if (FormatTok->Tok.is(tok::kw_break)) {
if (Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always) {
addUnwrappedLine();
- if (!Style.IndentCaseBlocks &&
- Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) {
+ if (RemoveWhitesmithsCaseIndent) {
Line->Level++;
}
}
@@ -2244,6 +2273,7 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
void UnwrappedLineParser::parseCaseLabel() {
assert(FormatTok->Tok.is(tok::kw_case) && "'case' expected");
+
// FIXME: fix handling of complex expressions here.
do {
nextToken();
@@ -2279,6 +2309,117 @@ void UnwrappedLineParser::parseAccessSpecifier() {
addUnwrappedLine();
}
+void UnwrappedLineParser::parseConcept() {
+ assert(FormatTok->Tok.is(tok::kw_concept) && "'concept' expected");
+ nextToken();
+ if (!FormatTok->Tok.is(tok::identifier))
+ return;
+ nextToken();
+ if (!FormatTok->Tok.is(tok::equal))
+ return;
+ nextToken();
+ if (FormatTok->Tok.is(tok::kw_requires)) {
+ nextToken();
+ parseRequiresExpression(Line->Level);
+ } else {
+ parseConstraintExpression(Line->Level);
+ }
+}
+
+void UnwrappedLineParser::parseRequiresExpression(unsigned int OriginalLevel) {
+ // requires (R range)
+ if (FormatTok->Tok.is(tok::l_paren)) {
+ parseParens();
+ if (Style.IndentRequires && OriginalLevel != Line->Level) {
+ addUnwrappedLine();
+ --Line->Level;
+ }
+ }
+
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ if (Style.BraceWrapping.AfterFunction)
+ addUnwrappedLine();
+ FormatTok->setType(TT_FunctionLBrace);
+ parseBlock(/*MustBeDeclaration=*/false);
+ addUnwrappedLine();
+ } else {
+ parseConstraintExpression(OriginalLevel);
+ }
+}
+
+void UnwrappedLineParser::parseConstraintExpression(
+ unsigned int OriginalLevel) {
+ // requires Id<T> && Id<T> || Id<T>
+ while (
+ FormatTok->isOneOf(tok::identifier, tok::kw_requires, tok::coloncolon)) {
+ nextToken();
+ while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::less,
+ tok::greater, tok::comma, tok::ellipsis)) {
+ if (FormatTok->Tok.is(tok::less)) {
+ parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
+ /*ClosingBraceKind=*/tok::greater);
+ continue;
+ }
+ nextToken();
+ }
+ if (FormatTok->Tok.is(tok::kw_requires)) {
+ parseRequiresExpression(OriginalLevel);
+ }
+ if (FormatTok->Tok.is(tok::less)) {
+ parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
+ /*ClosingBraceKind=*/tok::greater);
+ }
+
+ if (FormatTok->Tok.is(tok::l_paren)) {
+ parseParens();
+ }
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ if (Style.BraceWrapping.AfterFunction)
+ addUnwrappedLine();
+ FormatTok->setType(TT_FunctionLBrace);
+ parseBlock(/*MustBeDeclaration=*/false);
+ }
+ if (FormatTok->Tok.is(tok::semi)) {
+ // Eat any trailing semi.
+ nextToken();
+ addUnwrappedLine();
+ }
+ if (FormatTok->Tok.is(tok::colon)) {
+ return;
+ }
+ if (!FormatTok->Tok.isOneOf(tok::ampamp, tok::pipepipe)) {
+ if (FormatTok->Previous &&
+ !FormatTok->Previous->isOneOf(tok::identifier, tok::kw_requires,
+ tok::coloncolon)) {
+ addUnwrappedLine();
+ }
+ if (Style.IndentRequires && OriginalLevel != Line->Level) {
+ --Line->Level;
+ }
+ break;
+ } else {
+ FormatTok->setType(TT_ConstraintJunctions);
+ }
+
+ nextToken();
+ }
+}
+
+void UnwrappedLineParser::parseRequires() {
+ assert(FormatTok->Tok.is(tok::kw_requires) && "'requires' expected");
+
+ unsigned OriginalLevel = Line->Level;
+ if (FormatTok->Previous && FormatTok->Previous->is(tok::greater)) {
+ addUnwrappedLine();
+ if (Style.IndentRequires) {
+ Line->Level++;
+ }
+ }
+ nextToken();
+
+ parseRequiresExpression(OriginalLevel);
+}
+
bool UnwrappedLineParser::parseEnum() {
// Won't be 'enum' for NS_ENUMs.
if (FormatTok->Tok.is(tok::kw_enum))
@@ -2318,7 +2459,7 @@ bool UnwrappedLineParser::parseEnum() {
// Just a declaration or something is wrong.
if (FormatTok->isNot(tok::l_brace))
return true;
- FormatTok->BlockKind = BK_Block;
+ FormatTok->setBlockKind(BK_Block);
if (Style.Language == FormatStyle::LK_Java) {
// Java enums are different.
@@ -2612,32 +2753,15 @@ void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
// @interface can be followed by a lightweight generic
// specialization list, then either a base class or a category.
if (FormatTok->Tok.is(tok::less)) {
- // Unlike protocol lists, generic parameterizations support
- // nested angles:
- //
- // @interface Foo<ValueType : id <NSCopying, NSSecureCoding>> :
- // NSObject <NSCopying, NSSecureCoding>
- //
- // so we need to count how many open angles we have left.
- unsigned NumOpenAngles = 1;
- do {
- nextToken();
- // Early exit in case someone forgot a close angle.
- if (FormatTok->isOneOf(tok::semi, tok::l_brace) ||
- FormatTok->Tok.isObjCAtKeyword(tok::objc_end))
- break;
- if (FormatTok->Tok.is(tok::less))
- ++NumOpenAngles;
- else if (FormatTok->Tok.is(tok::greater)) {
- assert(NumOpenAngles > 0 && "'>' makes NumOpenAngles negative");
- --NumOpenAngles;
- }
- } while (!eof() && NumOpenAngles != 0);
- nextToken(); // Skip '>'.
+ parseObjCLightweightGenerics();
}
if (FormatTok->Tok.is(tok::colon)) {
nextToken();
nextToken(); // base class name
+ // The base class can also have lightweight generics applied to it.
+ if (FormatTok->Tok.is(tok::less)) {
+ parseObjCLightweightGenerics();
+ }
} else if (FormatTok->Tok.is(tok::l_paren))
// Skip category, if present.
parseParens();
@@ -2658,6 +2782,32 @@ void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
parseObjCUntilAtEnd();
}
+void UnwrappedLineParser::parseObjCLightweightGenerics() {
+ assert(FormatTok->Tok.is(tok::less));
+ // Unlike protocol lists, generic parameterizations support
+ // nested angles:
+ //
+ // @interface Foo<ValueType : id <NSCopying, NSSecureCoding>> :
+ // NSObject <NSCopying, NSSecureCoding>
+ //
+ // so we need to count how many open angles we have left.
+ unsigned NumOpenAngles = 1;
+ do {
+ nextToken();
+ // Early exit in case someone forgot a close angle.
+ if (FormatTok->isOneOf(tok::semi, tok::l_brace) ||
+ FormatTok->Tok.isObjCAtKeyword(tok::objc_end))
+ break;
+ if (FormatTok->Tok.is(tok::less))
+ ++NumOpenAngles;
+ else if (FormatTok->Tok.is(tok::greater)) {
+ assert(NumOpenAngles > 0 && "'>' makes NumOpenAngles negative");
+ --NumOpenAngles;
+ }
+ } while (!eof() && NumOpenAngles != 0);
+ nextToken(); // Skip '>'.
+}
+
// Returns true for the declaration/definition form of @protocol,
// false for the expression form.
bool UnwrappedLineParser::parseObjCProtocol() {
@@ -2726,7 +2876,7 @@ void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
return;
}
if (FormatTok->is(tok::l_brace)) {
- FormatTok->BlockKind = BK_Block;
+ FormatTok->setBlockKind(BK_Block);
nextToken();
parseBracedList();
} else {
@@ -2753,7 +2903,7 @@ LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line,
E = Line.Tokens.end();
I != E; ++I) {
llvm::dbgs() << I->Tok->Tok.getName() << "["
- << "T=" << I->Tok->getType()
+ << "T=" << (unsigned)I->Tok->getType()
<< ", OC=" << I->Tok->OriginalColumn << "] ";
}
for (std::list<UnwrappedLineNode>::const_iterator I = Line.Tokens.begin(),
@@ -3037,7 +3187,6 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
}
FormatTok = Tokens->getNextToken();
FormatTok->MustBreakBefore = true;
- FormatTok->MustBreakAlignBefore = true;
}
if (!PPStack.empty() && (PPStack.back().Kind == PP_Unreachable) &&
@@ -3062,7 +3211,6 @@ void UnwrappedLineParser::pushToken(FormatToken *Tok) {
Line->Tokens.push_back(UnwrappedLineNode(Tok));
if (MustBreakBeforeNextToken) {
Line->Tokens.back().Tok->MustBreakBefore = true;
- Line->Tokens.back().Tok->MustBreakAlignBefore = true;
MustBreakBeforeNextToken = false;
}
}
diff --git a/clang/lib/Format/UnwrappedLineParser.h b/clang/lib/Format/UnwrappedLineParser.h
index 8b3aa4c84edb..02b328cb72de 100644
--- a/clang/lib/Format/UnwrappedLineParser.h
+++ b/clang/lib/Format/UnwrappedLineParser.h
@@ -113,11 +113,16 @@ private:
void parseNew();
void parseAccessSpecifier();
bool parseEnum();
+ void parseConcept();
+ void parseRequires();
+ void parseRequiresExpression(unsigned int OriginalLevel);
+ void parseConstraintExpression(unsigned int OriginalLevel);
void parseJavaEnumBody();
// Parses a record (aka class) as a top level element. If ParseAsExpr is true,
// parses the record as a child block, i.e. if the class declaration is an
// expression.
void parseRecord(bool ParseAsExpr = false);
+ void parseObjCLightweightGenerics();
void parseObjCMethod();
void parseObjCProtocolList();
void parseObjCUntilAtEnd();
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index 32e0b685ea0f..7d6964b7c72f 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -49,7 +49,7 @@ void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
bool IsAligned, bool InPPDirective) {
if (Tok.Finalized)
return;
- Tok.Decision = (Newlines > 0) ? FD_Break : FD_Continue;
+ Tok.setDecision((Newlines > 0) ? FD_Break : FD_Continue);
Changes.push_back(Change(Tok, /*CreateReplacement=*/true, Tok.WhitespaceRange,
Spaces, StartOfTokenColumn, Newlines, "", "",
IsAligned, InPPDirective && !Tok.IsFirst,
@@ -361,9 +361,10 @@ AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
// that are split across multiple lines. See the test case in FormatTest.cpp
// that mentions "split function parameter alignment" for an example of this.
template <typename F>
-static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
- SmallVector<WhitespaceManager::Change, 16> &Changes,
- unsigned StartAt) {
+static unsigned AlignTokens(
+ const FormatStyle &Style, F &&Matches,
+ SmallVector<WhitespaceManager::Change, 16> &Changes, unsigned StartAt,
+ const FormatStyle::AlignConsecutiveStyle &ACS = FormatStyle::ACS_None) {
unsigned MinColumn = 0;
unsigned MaxColumn = UINT_MAX;
@@ -386,6 +387,9 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// Whether a matching token has been found on the current line.
bool FoundMatchOnLine = false;
+ // Whether the current line consists purely of comments.
+ bool LineIsComment = true;
+
// Aligns a sequence of matching tokens, on the MinColumn column.
//
// Sequences start from the first matching token to align, and end at the
@@ -411,21 +415,38 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
if (Changes[i].NewlinesBefore != 0) {
CommasBeforeMatch = 0;
EndOfSequence = i;
- // If there is a blank line, there is a forced-align-break (eg,
- // preprocessor), or if the last line didn't contain any matching token,
- // the sequence ends here.
- if (Changes[i].NewlinesBefore > 1 ||
- Changes[i].Tok->MustBreakAlignBefore || !FoundMatchOnLine)
+
+ // Whether to break the alignment sequence because of an empty line.
+ bool EmptyLineBreak =
+ (Changes[i].NewlinesBefore > 1) &&
+ (ACS != FormatStyle::ACS_AcrossEmptyLines) &&
+ (ACS != FormatStyle::ACS_AcrossEmptyLinesAndComments);
+
+ // Whether to break the alignment sequence because of a line without a
+ // match.
+ bool NoMatchBreak =
+ !FoundMatchOnLine &&
+ !(LineIsComment &&
+ ((ACS == FormatStyle::ACS_AcrossComments) ||
+ (ACS == FormatStyle::ACS_AcrossEmptyLinesAndComments)));
+
+ if (EmptyLineBreak || NoMatchBreak)
AlignCurrentSequence();
+ // A new line starts, re-initialize line status tracking bools.
FoundMatchOnLine = false;
+ LineIsComment = true;
+ }
+
+ if (!Changes[i].Tok->is(tok::comment)) {
+ LineIsComment = false;
}
if (Changes[i].Tok->is(tok::comma)) {
++CommasBeforeMatch;
} else if (Changes[i].indentAndNestingLevel() > IndentAndNestingLevel) {
// Call AlignTokens recursively, skipping over this scope block.
- unsigned StoppedAt = AlignTokens(Style, Matches, Changes, i);
+ unsigned StoppedAt = AlignTokens(Style, Matches, Changes, i, ACS);
i = StoppedAt - 1;
continue;
}
@@ -520,7 +541,7 @@ static void AlignMacroSequence(
}
void WhitespaceManager::alignConsecutiveMacros() {
- if (!Style.AlignConsecutiveMacros)
+ if (Style.AlignConsecutiveMacros == FormatStyle::ACS_None)
return;
auto AlignMacrosMatches = [](const Change &C) {
@@ -562,17 +583,41 @@ void WhitespaceManager::alignConsecutiveMacros() {
// Whether a matching token has been found on the current line.
bool FoundMatchOnLine = false;
+ // Whether the current line consists only of comments
+ bool LineIsComment = true;
+
unsigned I = 0;
for (unsigned E = Changes.size(); I != E; ++I) {
if (Changes[I].NewlinesBefore != 0) {
EndOfSequence = I;
- // If there is a blank line, or if the last line didn't contain any
- // matching token, the sequence ends here.
- if (Changes[I].NewlinesBefore > 1 || !FoundMatchOnLine)
+
+ // Whether to break the alignment sequence because of an empty line.
+ bool EmptyLineBreak =
+ (Changes[I].NewlinesBefore > 1) &&
+ (Style.AlignConsecutiveMacros != FormatStyle::ACS_AcrossEmptyLines) &&
+ (Style.AlignConsecutiveMacros !=
+ FormatStyle::ACS_AcrossEmptyLinesAndComments);
+
+ // Whether to break the alignment sequence because of a line without a
+ // match.
+ bool NoMatchBreak =
+ !FoundMatchOnLine &&
+ !(LineIsComment && ((Style.AlignConsecutiveMacros ==
+ FormatStyle::ACS_AcrossComments) ||
+ (Style.AlignConsecutiveMacros ==
+ FormatStyle::ACS_AcrossEmptyLinesAndComments)));
+
+ if (EmptyLineBreak || NoMatchBreak)
AlignMacroSequence(StartOfSequence, EndOfSequence, MinColumn, MaxColumn,
FoundMatchOnLine, AlignMacrosMatches, Changes);
+ // A new line starts, re-initialize line status tracking bools.
FoundMatchOnLine = false;
+ LineIsComment = true;
+ }
+
+ if (!Changes[I].Tok->is(tok::comment)) {
+ LineIsComment = false;
}
if (!AlignMacrosMatches(Changes[I]))
@@ -599,7 +644,7 @@ void WhitespaceManager::alignConsecutiveMacros() {
}
void WhitespaceManager::alignConsecutiveAssignments() {
- if (!Style.AlignConsecutiveAssignments)
+ if (Style.AlignConsecutiveAssignments == FormatStyle::ACS_None)
return;
AlignTokens(
@@ -615,11 +660,11 @@ void WhitespaceManager::alignConsecutiveAssignments() {
return C.Tok->is(tok::equal);
},
- Changes, /*StartAt=*/0);
+ Changes, /*StartAt=*/0, Style.AlignConsecutiveAssignments);
}
void WhitespaceManager::alignConsecutiveBitFields() {
- if (!Style.AlignConsecutiveBitFields)
+ if (Style.AlignConsecutiveBitFields == FormatStyle::ACS_None)
return;
AlignTokens(
@@ -635,11 +680,11 @@ void WhitespaceManager::alignConsecutiveBitFields() {
return C.Tok->is(TT_BitFieldColon);
},
- Changes, /*StartAt=*/0);
+ Changes, /*StartAt=*/0, Style.AlignConsecutiveBitFields);
}
void WhitespaceManager::alignConsecutiveDeclarations() {
- if (!Style.AlignConsecutiveDeclarations)
+ if (Style.AlignConsecutiveDeclarations == FormatStyle::ACS_None)
return;
// FIXME: Currently we don't handle properly the PointerAlignment: Right
@@ -657,6 +702,9 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
return true;
if (C.Tok->isNot(TT_StartOfName))
return false;
+ if (C.Tok->Previous &&
+ C.Tok->Previous->is(TT_StatementAttributeLikeMacro))
+ return false;
// Check if there is a subsequent name that starts the same declaration.
for (FormatToken *Next = C.Tok->Next; Next; Next = Next->Next) {
if (Next->is(tok::comment))
@@ -669,7 +717,7 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
}
return true;
},
- Changes, /*StartAt=*/0);
+ Changes, /*StartAt=*/0, Style.AlignConsecutiveDeclarations);
}
void WhitespaceManager::alignChainedConditionals() {
@@ -726,8 +774,6 @@ void WhitespaceManager::alignTrailingComments() {
if (Changes[i].StartOfBlockComment)
continue;
Newlines += Changes[i].NewlinesBefore;
- if (Changes[i].Tok->MustBreakAlignBefore)
- BreakBeforeNext = true;
if (!Changes[i].IsTrailingComment)
continue;
diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp
index 57d025b7c32e..e7a87dc6b23c 100644
--- a/clang/lib/Frontend/ASTUnit.cpp
+++ b/clang/lib/Frontend/ASTUnit.cpp
@@ -69,6 +69,7 @@
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@@ -757,9 +758,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
WhatToLoad ToLoad, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts, bool UseDebugInfo,
- bool OnlyLocalDecls, ArrayRef<RemappedFile> RemappedFiles,
- CaptureDiagsKind CaptureDiagnostics, bool AllowPCHWithCompilerErrors,
- bool UserFilesAreVolatile) {
+ bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics,
+ bool AllowASTWithCompilerErrors, bool UserFilesAreVolatile) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
// Recover resources if we crash before exiting this method.
@@ -792,9 +792,6 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
/*Target=*/nullptr));
AST->PPOpts = std::make_shared<PreprocessorOptions>();
- for (const auto &RemappedFile : RemappedFiles)
- AST->PPOpts->addRemappedFile(RemappedFile.first, RemappedFile.second);
-
// Gather Info for preprocessor construction later on.
HeaderSearch &HeaderInfo = *AST->HeaderInfo;
@@ -812,13 +809,14 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
PP.getIdentifierTable(), PP.getSelectorTable(),
PP.getBuiltinInfo());
- bool disableValid = false;
+ DisableValidationForModuleKind disableValid =
+ DisableValidationForModuleKind::None;
if (::getenv("LIBCLANG_DISABLE_PCH_VALIDATION"))
- disableValid = true;
+ disableValid = DisableValidationForModuleKind::All;
AST->Reader = new ASTReader(
PP, *AST->ModuleCache, AST->Ctx.get(), PCHContainerRdr, {},
/*isysroot=*/"",
- /*DisableValidation=*/disableValid, AllowPCHWithCompilerErrors);
+ /*DisableValidation=*/disableValid, AllowASTWithCompilerErrors);
AST->Reader->setListener(std::make_unique<ASTInfoCollector>(
*AST->PP, AST->Ctx.get(), *AST->HSOpts, *AST->PPOpts, *AST->LangOpts,
@@ -1118,6 +1116,19 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
std::unique_ptr<CompilerInstance> Clang(
new CompilerInstance(std::move(PCHContainerOps)));
+ // Clean up on error, disengage it if the function returns successfully.
+ auto CleanOnError = llvm::make_scope_exit([&]() {
+ // Remove the overridden buffer we used for the preamble.
+ SavedMainFileBuffer = nullptr;
+
+ // Keep the ownership of the data in the ASTUnit because the client may
+ // want to see the diagnostics.
+ transferASTDataFromCompilerInstance(*Clang);
+ FailedParseDiagnostics.swap(StoredDiagnostics);
+ StoredDiagnostics.clear();
+ NumStoredDiagnosticsFromDriver = 0;
+ });
+
// Ensure that Clang has a FileManager with the right VFS, which may have
// changed above in AddImplicitPreamble. If VFS is nullptr, rely on
// createFileManager to create one.
@@ -1172,9 +1183,6 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
TopLevelDeclsInPreamble.clear();
}
- // Create a file manager object to provide access to and cache the filesystem.
- Clang->setFileManager(&getFileManager());
-
// Create the source manager.
Clang->setSourceManager(&getSourceManager());
@@ -1200,7 +1208,7 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
ActCleanup(Act.get());
if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0]))
- goto error;
+ return true;
if (SavedMainFileBuffer)
TranslateStoredDiagnostics(getFileManager(), getSourceManager(),
@@ -1210,7 +1218,7 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
if (llvm::Error Err = Act->Execute()) {
consumeError(std::move(Err)); // FIXME this drops errors on the floor.
- goto error;
+ return true;
}
transferASTDataFromCompilerInstance(*Clang);
@@ -1219,19 +1227,9 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
FailedParseDiagnostics.clear();
- return false;
-
-error:
- // Remove the overridden buffer we used for the preamble.
- SavedMainFileBuffer = nullptr;
+ CleanOnError.release();
- // Keep the ownership of the data in the ASTUnit because the client may
- // want to see the diagnostics.
- transferASTDataFromCompilerInstance(*Clang);
- FailedParseDiagnostics.swap(StoredDiagnostics);
- StoredDiagnostics.clear();
- NumStoredDiagnosticsFromDriver = 0;
- return true;
+ return false;
}
static std::pair<unsigned, unsigned>
@@ -1313,15 +1311,14 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
if (!MainFileBuffer)
return nullptr;
- PreambleBounds Bounds =
- ComputePreambleBounds(*PreambleInvocationIn.getLangOpts(),
- MainFileBuffer.get(), MaxLines);
+ PreambleBounds Bounds = ComputePreambleBounds(
+ *PreambleInvocationIn.getLangOpts(), *MainFileBuffer, MaxLines);
if (!Bounds.Size)
return nullptr;
if (Preamble) {
- if (Preamble->CanReuse(PreambleInvocationIn, MainFileBuffer.get(), Bounds,
- VFS.get())) {
+ if (Preamble->CanReuse(PreambleInvocationIn, *MainFileBuffer, Bounds,
+ *VFS)) {
// Okay! We can re-use the precompiled preamble.
// Set the state of the diagnostic object to mimic its state
@@ -1468,7 +1465,7 @@ StringRef ASTUnit::getMainFileName() const {
if (Input.isFile())
return Input.getFile();
else
- return Input.getBuffer()->getBufferIdentifier();
+ return Input.getBuffer().getBufferIdentifier();
}
if (SourceMgr) {
@@ -1517,8 +1514,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
ASTUnit *Unit, bool Persistent, StringRef ResourceFilesPath,
bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics,
unsigned PrecompilePreambleAfterNParses, bool CacheCodeCompletionResults,
- bool IncludeBriefCommentsInCodeCompletion, bool UserFilesAreVolatile,
- std::unique_ptr<ASTUnit> *ErrAST) {
+ bool UserFilesAreVolatile, std::unique_ptr<ASTUnit> *ErrAST) {
assert(CI && "A CompilerInvocation is required");
std::unique_ptr<ASTUnit> OwnAST;
@@ -1541,8 +1537,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
AST->PreambleRebuildCountdown = PrecompilePreambleAfterNParses;
AST->TUKind = Action ? Action->getTranslationUnitKind() : TU_Complete;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
- AST->IncludeBriefCommentsInCodeCompletion
- = IncludeBriefCommentsInCodeCompletion;
+ AST->IncludeBriefCommentsInCodeCompletion = false;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
@@ -2239,28 +2234,30 @@ void ASTUnit::CodeComplete(
= new AugmentedCodeCompleteConsumer(*this, Consumer, CodeCompleteOpts);
Clang->setCodeCompletionConsumer(AugmentedConsumer);
+ auto getUniqueID =
+ [&FileMgr](StringRef Filename) -> Optional<llvm::sys::fs::UniqueID> {
+ if (auto Status = FileMgr.getVirtualFileSystem().status(Filename))
+ return Status->getUniqueID();
+ return None;
+ };
+
+ auto hasSameUniqueID = [getUniqueID](StringRef LHS, StringRef RHS) {
+ if (LHS == RHS)
+ return true;
+ if (auto LHSID = getUniqueID(LHS))
+ if (auto RHSID = getUniqueID(RHS))
+ return *LHSID == *RHSID;
+ return false;
+ };
+
// If we have a precompiled preamble, try to use it. We only allow
// the use of the precompiled preamble if we're if the completion
// point is within the main file, after the end of the precompiled
// preamble.
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
- if (Preamble) {
- std::string CompleteFilePath(File);
-
- auto &VFS = FileMgr.getVirtualFileSystem();
- auto CompleteFileStatus = VFS.status(CompleteFilePath);
- if (CompleteFileStatus) {
- llvm::sys::fs::UniqueID CompleteFileID = CompleteFileStatus->getUniqueID();
-
- std::string MainPath(OriginalSourceFile);
- auto MainStatus = VFS.status(MainPath);
- if (MainStatus) {
- llvm::sys::fs::UniqueID MainID = MainStatus->getUniqueID();
- if (CompleteFileID == MainID && Line > 1)
- OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(
- PCHContainerOps, Inv, &VFS, false, Line - 1);
- }
- }
+ if (Preamble && Line > 1 && hasSameUniqueID(File, OriginalSourceFile)) {
+ OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(
+ PCHContainerOps, Inv, &FileMgr.getVirtualFileSystem(), false, Line - 1);
}
// If the main file has been overridden due to the use of a preamble,
diff --git a/clang/lib/Frontend/ChainedIncludesSource.cpp b/clang/lib/Frontend/ChainedIncludesSource.cpp
index 1486adf70c3f..380eba4562b4 100644
--- a/clang/lib/Frontend/ChainedIncludesSource.cpp
+++ b/clang/lib/Frontend/ChainedIncludesSource.cpp
@@ -83,10 +83,10 @@ createASTReader(CompilerInstance &CI, StringRef pchFile,
ASTDeserializationListener *deserialListener = nullptr) {
Preprocessor &PP = CI.getPreprocessor();
std::unique_ptr<ASTReader> Reader;
- Reader.reset(new ASTReader(PP, CI.getModuleCache(), &CI.getASTContext(),
- CI.getPCHContainerReader(),
- /*Extensions=*/{},
- /*isysroot=*/"", /*DisableValidation=*/true));
+ Reader.reset(new ASTReader(
+ PP, CI.getModuleCache(), &CI.getASTContext(), CI.getPCHContainerReader(),
+ /*Extensions=*/{},
+ /*isysroot=*/"", DisableValidationForModuleKind::PCH));
for (unsigned ti = 0; ti < bufNames.size(); ++ti) {
StringRef sr(bufNames[ti]);
Reader->addInMemoryBuffer(sr, std::move(MemBufs[ti]));
@@ -129,7 +129,8 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
CInvok->getPreprocessorOpts().ChainedIncludes.clear();
CInvok->getPreprocessorOpts().ImplicitPCHInclude.clear();
- CInvok->getPreprocessorOpts().DisablePCHValidation = true;
+ CInvok->getPreprocessorOpts().DisablePCHOrModuleValidation =
+ DisableValidationForModuleKind::PCH;
CInvok->getPreprocessorOpts().Includes.clear();
CInvok->getPreprocessorOpts().MacroIncludes.clear();
CInvok->getPreprocessorOpts().Macros.clear();
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index 4613ed8d7f61..956877d34680 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -85,7 +85,7 @@ void CompilerInstance::setDiagnostics(DiagnosticsEngine *Value) {
}
void CompilerInstance::setVerboseOutputStream(raw_ostream &Value) {
- OwnedVerboseOutputStream.release();
+ OwnedVerboseOutputStream.reset();
VerboseOutputStream = &Value;
}
@@ -346,10 +346,16 @@ static void InitializeFileRemapping(DiagnosticsEngine &Diags,
continue;
}
- // Override the contents of the "from" file with the contents of
- // the "to" file.
- SourceMgr.overrideFileContents(FromFile, RB.second,
- InitOpts.RetainRemappedFileBuffers);
+ // Override the contents of the "from" file with the contents of the
+ // "to" file. If the caller owns the buffers, then pass a MemoryBufferRef;
+ // otherwise, pass as a std::unique_ptr<MemoryBuffer> to transfer ownership
+ // to the SourceManager.
+ if (InitOpts.RetainRemappedFileBuffers)
+ SourceMgr.overrideFileContents(FromFile, RB.second->getMemBufferRef());
+ else
+ SourceMgr.overrideFileContents(
+ FromFile, std::unique_ptr<llvm::MemoryBuffer>(
+ const_cast<llvm::MemoryBuffer *>(RB.second)));
}
// Remap files in the source manager (with other files).
@@ -422,8 +428,12 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
PP->setPreprocessedOutput(getPreprocessorOutputOpts().ShowCPP);
- if (PP->getLangOpts().Modules && PP->getLangOpts().ImplicitModules)
- PP->getHeaderSearchInfo().setModuleCachePath(getSpecificModuleCachePath());
+ if (PP->getLangOpts().Modules && PP->getLangOpts().ImplicitModules) {
+ std::string ModuleHash = getInvocation().getModuleHash();
+ PP->getHeaderSearchInfo().setModuleHash(ModuleHash);
+ PP->getHeaderSearchInfo().setModuleCachePath(
+ getSpecificModuleCachePath(ModuleHash));
+ }
// Handle generating dependencies, if requested.
const DependencyOutputOptions &DepOpts = getDependencyOutputOpts();
@@ -471,13 +481,11 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
}
}
-std::string CompilerInstance::getSpecificModuleCachePath() {
- // Set up the module path, including the hash for the
- // module-creation options.
+std::string CompilerInstance::getSpecificModuleCachePath(StringRef ModuleHash) {
+ // Set up the module path, including the hash for the module-creation options.
SmallString<256> SpecificModuleCache(getHeaderSearchOpts().ModuleCachePath);
if (!SpecificModuleCache.empty() && !getHeaderSearchOpts().DisableModuleHash)
- llvm::sys::path::append(SpecificModuleCache,
- getInvocation().getModuleHash());
+ llvm::sys::path::append(SpecificModuleCache, ModuleHash);
return std::string(SpecificModuleCache.str());
}
@@ -495,11 +503,12 @@ void CompilerInstance::createASTContext() {
// ExternalASTSource
void CompilerInstance::createPCHExternalASTSource(
- StringRef Path, bool DisablePCHValidation, bool AllowPCHWithCompilerErrors,
- void *DeserializationListener, bool OwnDeserializationListener) {
+ StringRef Path, DisableValidationForModuleKind DisableValidation,
+ bool AllowPCHWithCompilerErrors, void *DeserializationListener,
+ bool OwnDeserializationListener) {
bool Preamble = getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
TheASTReader = createPCHExternalASTSource(
- Path, getHeaderSearchOpts().Sysroot, DisablePCHValidation,
+ Path, getHeaderSearchOpts().Sysroot, DisableValidation,
AllowPCHWithCompilerErrors, getPreprocessor(), getModuleCache(),
getASTContext(), getPCHContainerReader(),
getFrontendOpts().ModuleFileExtensions, DependencyCollectors,
@@ -508,7 +517,8 @@ void CompilerInstance::createPCHExternalASTSource(
}
IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource(
- StringRef Path, StringRef Sysroot, bool DisablePCHValidation,
+ StringRef Path, StringRef Sysroot,
+ DisableValidationForModuleKind DisableValidation,
bool AllowPCHWithCompilerErrors, Preprocessor &PP,
InMemoryModuleCache &ModuleCache, ASTContext &Context,
const PCHContainerReader &PCHContainerRdr,
@@ -520,7 +530,7 @@ IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource(
IntrusiveRefCntPtr<ASTReader> Reader(new ASTReader(
PP, ModuleCache, &Context, PCHContainerRdr, Extensions,
- Sysroot.empty() ? "" : Sysroot.data(), DisablePCHValidation,
+ Sysroot.empty() ? "" : Sysroot.data(), DisableValidation,
AllowPCHWithCompilerErrors, /*AllowConfigurationMismatch*/ false,
HSOpts.ModulesValidateSystemHeaders, HSOpts.ValidateASTInputFilesContent,
UseGlobalModuleIndex));
@@ -636,31 +646,32 @@ void CompilerInstance::createSema(TranslationUnitKind TUKind,
// Output Files
-void CompilerInstance::addOutputFile(OutputFile &&OutFile) {
- OutputFiles.push_back(std::move(OutFile));
-}
-
void CompilerInstance::clearOutputFiles(bool EraseFiles) {
for (OutputFile &OF : OutputFiles) {
- if (!OF.TempFilename.empty()) {
- if (EraseFiles) {
+ if (EraseFiles) {
+ if (!OF.TempFilename.empty()) {
llvm::sys::fs::remove(OF.TempFilename);
- } else {
- SmallString<128> NewOutFile(OF.Filename);
-
- // If '-working-directory' was passed, the output filename should be
- // relative to that.
- FileMgr->FixupRelativePath(NewOutFile);
- if (std::error_code ec =
- llvm::sys::fs::rename(OF.TempFilename, NewOutFile)) {
- getDiagnostics().Report(diag::err_unable_to_rename_temp)
- << OF.TempFilename << OF.Filename << ec.message();
-
- llvm::sys::fs::remove(OF.TempFilename);
- }
+ continue;
}
- } else if (!OF.Filename.empty() && EraseFiles)
- llvm::sys::fs::remove(OF.Filename);
+ if (!OF.Filename.empty())
+ llvm::sys::fs::remove(OF.Filename);
+ continue;
+ }
+
+ if (OF.TempFilename.empty())
+ continue;
+
+ // If '-working-directory' was passed, the output filename should be
+ // relative to that.
+ SmallString<128> NewOutFile(OF.Filename);
+ FileMgr->FixupRelativePath(NewOutFile);
+ std::error_code EC = llvm::sys::fs::rename(OF.TempFilename, NewOutFile);
+ if (!EC)
+ continue;
+ getDiagnostics().Report(diag::err_unable_to_rename_temp)
+ << OF.TempFilename << OF.Filename << EC.message();
+
+ llvm::sys::fs::remove(OF.TempFilename);
}
OutputFiles.clear();
if (DeleteBuiltModules) {
@@ -668,15 +679,29 @@ void CompilerInstance::clearOutputFiles(bool EraseFiles) {
llvm::sys::fs::remove(Module.second);
BuiltModules.clear();
}
- NonSeekStream.reset();
}
std::unique_ptr<raw_pwrite_stream>
CompilerInstance::createDefaultOutputFile(bool Binary, StringRef InFile,
- StringRef Extension) {
- return createOutputFile(getFrontendOpts().OutputFile, Binary,
- /*RemoveFileOnSignal=*/true, InFile, Extension,
- getFrontendOpts().UseTemporary);
+ StringRef Extension,
+ bool RemoveFileOnSignal,
+ bool CreateMissingDirectories) {
+ StringRef OutputPath = getFrontendOpts().OutputFile;
+ Optional<SmallString<128>> PathStorage;
+ if (OutputPath.empty()) {
+ if (InFile == "-" || Extension.empty()) {
+ OutputPath = "-";
+ } else {
+ PathStorage.emplace(InFile);
+ llvm::sys::path::replace_extension(*PathStorage, Extension);
+ OutputPath = *PathStorage;
+ }
+ }
+
+ // Force a temporary file if RemoveFileOnSignal was disabled.
+ return createOutputFile(OutputPath, Binary, RemoveFileOnSignal,
+ getFrontendOpts().UseTemporary || !RemoveFileOnSignal,
+ CreateMissingDirectories);
}
std::unique_ptr<raw_pwrite_stream> CompilerInstance::createNullOutputFile() {
@@ -685,64 +710,40 @@ std::unique_ptr<raw_pwrite_stream> CompilerInstance::createNullOutputFile() {
std::unique_ptr<raw_pwrite_stream>
CompilerInstance::createOutputFile(StringRef OutputPath, bool Binary,
- bool RemoveFileOnSignal, StringRef InFile,
- StringRef Extension, bool UseTemporary,
+ bool RemoveFileOnSignal, bool UseTemporary,
bool CreateMissingDirectories) {
- std::string OutputPathName, TempPathName;
- std::error_code EC;
- std::unique_ptr<raw_pwrite_stream> OS = createOutputFile(
- OutputPath, EC, Binary, RemoveFileOnSignal, InFile, Extension,
- UseTemporary, CreateMissingDirectories, &OutputPathName, &TempPathName);
- if (!OS) {
- getDiagnostics().Report(diag::err_fe_unable_to_open_output) << OutputPath
- << EC.message();
- return nullptr;
- }
-
- // Add the output file -- but don't try to remove "-", since this means we are
- // using stdin.
- addOutputFile(
- OutputFile((OutputPathName != "-") ? OutputPathName : "", TempPathName));
-
- return OS;
+ Expected<std::unique_ptr<raw_pwrite_stream>> OS =
+ createOutputFileImpl(OutputPath, Binary, RemoveFileOnSignal, UseTemporary,
+ CreateMissingDirectories);
+ if (OS)
+ return std::move(*OS);
+ getDiagnostics().Report(diag::err_fe_unable_to_open_output)
+ << OutputPath << errorToErrorCode(OS.takeError()).message();
+ return nullptr;
}
-std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
- StringRef OutputPath, std::error_code &Error, bool Binary,
- bool RemoveFileOnSignal, StringRef InFile, StringRef Extension,
- bool UseTemporary, bool CreateMissingDirectories,
- std::string *ResultPathName, std::string *TempPathName) {
+Expected<std::unique_ptr<llvm::raw_pwrite_stream>>
+CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
+ bool RemoveFileOnSignal,
+ bool UseTemporary,
+ bool CreateMissingDirectories) {
assert((!CreateMissingDirectories || UseTemporary) &&
"CreateMissingDirectories is only allowed when using temporary files");
- std::string OutFile, TempFile;
- if (!OutputPath.empty()) {
- OutFile = std::string(OutputPath);
- } else if (InFile == "-") {
- OutFile = "-";
- } else if (!Extension.empty()) {
- SmallString<128> Path(InFile);
- llvm::sys::path::replace_extension(Path, Extension);
- OutFile = std::string(Path.str());
- } else {
- OutFile = "-";
- }
-
std::unique_ptr<llvm::raw_fd_ostream> OS;
- std::string OSFile;
+ Optional<StringRef> OSFile;
if (UseTemporary) {
- if (OutFile == "-")
+ if (OutputPath == "-")
UseTemporary = false;
else {
llvm::sys::fs::file_status Status;
llvm::sys::fs::status(OutputPath, Status);
if (llvm::sys::fs::exists(Status)) {
// Fail early if we can't write to the final destination.
- if (!llvm::sys::fs::can_write(OutputPath)) {
- Error = make_error_code(llvm::errc::operation_not_permitted);
- return nullptr;
- }
+ if (!llvm::sys::fs::can_write(OutputPath))
+ return llvm::errorCodeToError(
+ make_error_code(llvm::errc::operation_not_permitted));
// Don't use a temporary if the output is a special file. This handles
// things like '-o /dev/null'
@@ -752,14 +753,15 @@ std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
}
}
+ std::string TempFile;
if (UseTemporary) {
// Create a temporary file.
// Insert -%%%%%%%% before the extension (if any), and because some tools
// (noticeable, clang's own GlobalModuleIndex.cpp) glob for build
// artifacts, also append .tmp.
- StringRef OutputExtension = llvm::sys::path::extension(OutFile);
+ StringRef OutputExtension = llvm::sys::path::extension(OutputPath);
SmallString<128> TempPath =
- StringRef(OutFile).drop_back(OutputExtension.size());
+ StringRef(OutputPath).drop_back(OutputExtension.size());
TempPath += "-%%%%%%%%";
TempPath += OutputExtension;
TempPath += ".tmp";
@@ -786,30 +788,28 @@ std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
}
if (!OS) {
- OSFile = OutFile;
+ OSFile = OutputPath;
+ std::error_code EC;
OS.reset(new llvm::raw_fd_ostream(
- OSFile, Error,
+ *OSFile, EC,
(Binary ? llvm::sys::fs::OF_None : llvm::sys::fs::OF_Text)));
- if (Error)
- return nullptr;
+ if (EC)
+ return llvm::errorCodeToError(EC);
}
// Make sure the out stream file gets removed if we crash.
if (RemoveFileOnSignal)
- llvm::sys::RemoveFileOnSignal(OSFile);
+ llvm::sys::RemoveFileOnSignal(*OSFile);
- if (ResultPathName)
- *ResultPathName = OutFile;
- if (TempPathName)
- *TempPathName = TempFile;
+ // Add the output file -- but don't try to remove "-", since this means we are
+ // using stdin.
+ OutputFiles.emplace_back(((OutputPath != "-") ? OutputPath : "").str(),
+ std::move(TempFile));
if (!Binary || OS->supportsSeeking())
return std::move(OS);
- auto B = std::make_unique<llvm::buffer_ostream>(*OS);
- assert(!NonSeekStream);
- NonSeekStream = std::move(OS);
- return std::move(B);
+ return std::make_unique<llvm::buffer_unique_ostream>(std::move(OS));
}
// Initialization Utilities
@@ -831,8 +831,7 @@ bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
: Input.isSystem() ? SrcMgr::C_System : SrcMgr::C_User;
if (Input.isBuffer()) {
- SourceMgr.setMainFileID(SourceMgr.createFileID(SourceManager::Unowned,
- Input.getBuffer(), Kind));
+ SourceMgr.setMainFileID(SourceMgr.createFileID(Input.getBuffer(), Kind));
assert(SourceMgr.getMainFileID().isValid() &&
"Couldn't establish MainFileID!");
return true;
@@ -841,56 +840,22 @@ bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
StringRef InputFile = Input.getFile();
// Figure out where to get and map in the main file.
- if (InputFile != "-") {
- auto FileOrErr = FileMgr.getFileRef(InputFile, /*OpenFile=*/true);
- if (!FileOrErr) {
- // FIXME: include the error in the diagnostic.
- consumeError(FileOrErr.takeError());
+ auto FileOrErr = InputFile == "-"
+ ? FileMgr.getSTDIN()
+ : FileMgr.getFileRef(InputFile, /*OpenFile=*/true);
+ if (!FileOrErr) {
+ // FIXME: include the error in the diagnostic even when it's not stdin.
+ auto EC = llvm::errorToErrorCode(FileOrErr.takeError());
+ if (InputFile != "-")
Diags.Report(diag::err_fe_error_reading) << InputFile;
- return false;
- }
- FileEntryRef File = *FileOrErr;
-
- // The natural SourceManager infrastructure can't currently handle named
- // pipes, but we would at least like to accept them for the main
- // file. Detect them here, read them with the volatile flag so FileMgr will
- // pick up the correct size, and simply override their contents as we do for
- // STDIN.
- if (File.getFileEntry().isNamedPipe()) {
- auto MB =
- FileMgr.getBufferForFile(&File.getFileEntry(), /*isVolatile=*/true);
- if (MB) {
- // Create a new virtual file that will have the correct size.
- const FileEntry *FE =
- FileMgr.getVirtualFile(InputFile, (*MB)->getBufferSize(), 0);
- SourceMgr.overrideFileContents(FE, std::move(*MB));
- SourceMgr.setMainFileID(
- SourceMgr.createFileID(FE, SourceLocation(), Kind));
- } else {
- Diags.Report(diag::err_cannot_open_file) << InputFile
- << MB.getError().message();
- return false;
- }
- } else {
- SourceMgr.setMainFileID(
- SourceMgr.createFileID(File, SourceLocation(), Kind));
- }
- } else {
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> SBOrErr =
- llvm::MemoryBuffer::getSTDIN();
- if (std::error_code EC = SBOrErr.getError()) {
+ else
Diags.Report(diag::err_fe_error_reading_stdin) << EC.message();
- return false;
- }
- std::unique_ptr<llvm::MemoryBuffer> SB = std::move(SBOrErr.get());
-
- const FileEntry *File = FileMgr.getVirtualFile(SB->getBufferIdentifier(),
- SB->getBufferSize(), 0);
- SourceMgr.setMainFileID(
- SourceMgr.createFileID(File, SourceLocation(), Kind));
- SourceMgr.overrideFileContents(File, std::move(SB));
+ return false;
}
+ SourceMgr.setMainFileID(
+ SourceMgr.createFileID(*FileOrErr, SourceLocation(), Kind));
+
assert(SourceMgr.getMainFileID().isValid() &&
"Couldn't establish MainFileID!");
return true;
@@ -968,7 +933,7 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
<< " based upon " << BACKEND_PACKAGE_STRING
<< " default target " << llvm::sys::getDefaultTargetTriple() << "\n";
- if (getFrontendOpts().ShowTimers)
+ if (getCodeGenOpts().TimePasses)
createFrontendTimer();
if (getFrontendOpts().ShowStats || !getFrontendOpts().StatsFile.empty())
@@ -1175,10 +1140,8 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
diag::remark_module_build_done)
<< ModuleName;
- // Delete the temporary module map file.
- // FIXME: Even though we're executing under crash protection, it would still
- // be nice to do this with RemoveFileOnSignal when we can. However, that
- // doesn't make sense for all clients, so clean this up manually.
+ // Delete any remaining temporary files related to Instance, in case the
+ // module generation thread crashed.
Instance.clearOutputFiles(/*EraseFiles=*/true);
return !Instance.getDiagnostics().hasErrorOccurred();
@@ -1516,7 +1479,9 @@ void CompilerInstance::createASTReader() {
HeaderSearchOptions &HSOpts = getHeaderSearchOpts();
std::string Sysroot = HSOpts.Sysroot;
const PreprocessorOptions &PPOpts = getPreprocessorOpts();
+ const FrontendOptions &FEOpts = getFrontendOpts();
std::unique_ptr<llvm::Timer> ReadTimer;
+
if (FrontendTimerGroup)
ReadTimer = std::make_unique<llvm::Timer>("reading_modules",
"Reading modules",
@@ -1524,8 +1489,9 @@ void CompilerInstance::createASTReader() {
TheASTReader = new ASTReader(
getPreprocessor(), getModuleCache(), &getASTContext(),
getPCHContainerReader(), getFrontendOpts().ModuleFileExtensions,
- Sysroot.empty() ? "" : Sysroot.c_str(), PPOpts.DisablePCHValidation,
- /*AllowASTWithCompilerErrors=*/false,
+ Sysroot.empty() ? "" : Sysroot.c_str(),
+ PPOpts.DisablePCHOrModuleValidation,
+ /*AllowASTWithCompilerErrors=*/FEOpts.AllowPCMWithCompilerErrors,
/*AllowConfigurationMismatch=*/false, HSOpts.ModulesValidateSystemHeaders,
HSOpts.ValidateASTInputFilesContent,
getFrontendOpts().UseGlobalModuleIndex, std::move(ReadTimer));
@@ -1668,6 +1634,8 @@ static ModuleSource selectModuleSource(
if (!HSOpts.PrebuiltModuleFiles.empty() ||
!HSOpts.PrebuiltModulePaths.empty()) {
ModuleFilename = HS.getPrebuiltModuleFileName(ModuleName);
+ if (HSOpts.EnablePrebuiltImplicitModules && ModuleFilename.empty())
+ ModuleFilename = HS.getPrebuiltImplicitModuleFileName(M);
if (!ModuleFilename.empty())
return MS_PrebuiltModulePath;
}
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index 75d7cf5d26d3..d8be4ea14868 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -14,6 +14,7 @@
#include "clang/Basic/CommentOptions.h"
#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticDriver.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileSystemOptions.h"
#include "clang/Basic/LLVM.h"
@@ -57,6 +58,7 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/Linker/Linker.h"
#include "llvm/MC/MCTargetOptions.h"
@@ -66,6 +68,7 @@
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
#include "llvm/ProfileData/InstrProfReader.h"
+#include "llvm/Remarks/HotnessThresholdParser.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
@@ -90,6 +93,7 @@
#include <memory>
#include <string>
#include <tuple>
+#include <type_traits>
#include <utility>
#include <vector>
@@ -125,10 +129,126 @@ CompilerInvocationBase::~CompilerInvocationBase() = default;
#include "clang/Driver/Options.inc"
#undef SIMPLE_ENUM_VALUE_TABLE
-static llvm::Optional<unsigned> normalizeSimpleEnum(OptSpecifier Opt,
- unsigned TableIndex,
- const ArgList &Args,
- DiagnosticsEngine &Diags) {
+static llvm::Optional<bool>
+normalizeSimpleFlag(OptSpecifier Opt, unsigned TableIndex, const ArgList &Args,
+ DiagnosticsEngine &Diags, bool &Success) {
+ if (Args.hasArg(Opt))
+ return true;
+ return None;
+}
+
+static Optional<bool> normalizeSimpleNegativeFlag(OptSpecifier Opt, unsigned,
+ const ArgList &Args,
+ DiagnosticsEngine &,
+ bool &Success) {
+ if (Args.hasArg(Opt))
+ return false;
+ return None;
+}
+
+/// The tblgen-erated code passes in a fifth parameter of an arbitrary type, but
+/// denormalizeSimpleFlags never looks at it. Avoid bloating compile-time with
+/// unnecessary template instantiations and just ignore it with a variadic
+/// argument.
+static void denormalizeSimpleFlag(SmallVectorImpl<const char *> &Args,
+ const char *Spelling,
+ CompilerInvocation::StringAllocator,
+ Option::OptionClass, unsigned, /*T*/...) {
+ Args.push_back(Spelling);
+}
+
+template <typename T> static constexpr bool is_uint64_t_convertible() {
+ return !std::is_same<T, uint64_t>::value &&
+ llvm::is_integral_or_enum<T>::value;
+}
+
+template <typename T,
+ std::enable_if_t<!is_uint64_t_convertible<T>(), bool> = false>
+static auto makeFlagToValueNormalizer(T Value) {
+ return [Value](OptSpecifier Opt, unsigned, const ArgList &Args,
+ DiagnosticsEngine &, bool &Success) -> Optional<T> {
+ if (Args.hasArg(Opt))
+ return Value;
+ return None;
+ };
+}
+
+template <typename T,
+ std::enable_if_t<is_uint64_t_convertible<T>(), bool> = false>
+static auto makeFlagToValueNormalizer(T Value) {
+ return makeFlagToValueNormalizer(uint64_t(Value));
+}
+
+static auto makeBooleanOptionNormalizer(bool Value, bool OtherValue,
+ OptSpecifier OtherOpt) {
+ return [Value, OtherValue, OtherOpt](OptSpecifier Opt, unsigned,
+ const ArgList &Args, DiagnosticsEngine &,
+ bool &Success) -> Optional<bool> {
+ if (const Arg *A = Args.getLastArg(Opt, OtherOpt)) {
+ return A->getOption().matches(Opt) ? Value : OtherValue;
+ }
+ return None;
+ };
+}
+
+static auto makeBooleanOptionDenormalizer(bool Value) {
+ return [Value](SmallVectorImpl<const char *> &Args, const char *Spelling,
+ CompilerInvocation::StringAllocator, Option::OptionClass,
+ unsigned, bool KeyPath) {
+ if (KeyPath == Value)
+ Args.push_back(Spelling);
+ };
+}
+
+static void denormalizeStringImpl(SmallVectorImpl<const char *> &Args,
+ const char *Spelling,
+ CompilerInvocation::StringAllocator SA,
+ Option::OptionClass OptClass, unsigned,
+ Twine Value) {
+ switch (OptClass) {
+ case Option::SeparateClass:
+ case Option::JoinedOrSeparateClass:
+ Args.push_back(Spelling);
+ Args.push_back(SA(Value));
+ break;
+ case Option::JoinedClass:
+ Args.push_back(SA(Twine(Spelling) + Value));
+ break;
+ default:
+ llvm_unreachable("Cannot denormalize an option with option class "
+ "incompatible with string denormalization.");
+ }
+}
+
+template <typename T>
+static void
+denormalizeString(SmallVectorImpl<const char *> &Args, const char *Spelling,
+ CompilerInvocation::StringAllocator SA,
+ Option::OptionClass OptClass, unsigned TableIndex, T Value) {
+ denormalizeStringImpl(Args, Spelling, SA, OptClass, TableIndex, Twine(Value));
+}
+
+static Optional<SimpleEnumValue>
+findValueTableByName(const SimpleEnumValueTable &Table, StringRef Name) {
+ for (int I = 0, E = Table.Size; I != E; ++I)
+ if (Name == Table.Table[I].Name)
+ return Table.Table[I];
+
+ return None;
+}
+
+static Optional<SimpleEnumValue>
+findValueTableByValue(const SimpleEnumValueTable &Table, unsigned Value) {
+ for (int I = 0, E = Table.Size; I != E; ++I)
+ if (Value == Table.Table[I].Value)
+ return Table.Table[I];
+
+ return None;
+}
+
+static llvm::Optional<unsigned>
+normalizeSimpleEnum(OptSpecifier Opt, unsigned TableIndex, const ArgList &Args,
+ DiagnosticsEngine &Diags, bool &Success) {
assert(TableIndex < SimpleEnumValueTablesSize);
const SimpleEnumValueTable &Table = SimpleEnumValueTables[TableIndex];
@@ -137,42 +257,229 @@ static llvm::Optional<unsigned> normalizeSimpleEnum(OptSpecifier Opt,
return None;
StringRef ArgValue = Arg->getValue();
- for (int I = 0, E = Table.Size; I != E; ++I)
- if (ArgValue == Table.Table[I].Name)
- return Table.Table[I].Value;
+ if (auto MaybeEnumVal = findValueTableByName(Table, ArgValue))
+ return MaybeEnumVal->Value;
+ Success = false;
Diags.Report(diag::err_drv_invalid_value)
<< Arg->getAsString(Args) << ArgValue;
return None;
}
-static const char *denormalizeSimpleEnum(CompilerInvocation::StringAllocator SA,
- unsigned TableIndex, unsigned Value) {
+static void denormalizeSimpleEnumImpl(SmallVectorImpl<const char *> &Args,
+ const char *Spelling,
+ CompilerInvocation::StringAllocator SA,
+ Option::OptionClass OptClass,
+ unsigned TableIndex, unsigned Value) {
assert(TableIndex < SimpleEnumValueTablesSize);
const SimpleEnumValueTable &Table = SimpleEnumValueTables[TableIndex];
- for (int I = 0, E = Table.Size; I != E; ++I)
- if (Value == Table.Table[I].Value)
- return Table.Table[I].Name;
+ if (auto MaybeEnumVal = findValueTableByValue(Table, Value)) {
+ denormalizeString(Args, Spelling, SA, OptClass, TableIndex,
+ MaybeEnumVal->Name);
+ } else {
+ llvm_unreachable("The simple enum value was not correctly defined in "
+ "the tablegen option description");
+ }
+}
+
+template <typename T>
+static void denormalizeSimpleEnum(SmallVectorImpl<const char *> &Args,
+ const char *Spelling,
+ CompilerInvocation::StringAllocator SA,
+ Option::OptionClass OptClass,
+ unsigned TableIndex, T Value) {
+ return denormalizeSimpleEnumImpl(Args, Spelling, SA, OptClass, TableIndex,
+ static_cast<unsigned>(Value));
+}
+
+static Optional<std::string> normalizeString(OptSpecifier Opt, int TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags,
+ bool &Success) {
+ auto *Arg = Args.getLastArg(Opt);
+ if (!Arg)
+ return None;
+ return std::string(Arg->getValue());
+}
+
+template <typename IntTy>
+static Optional<IntTy>
+normalizeStringIntegral(OptSpecifier Opt, int, const ArgList &Args,
+ DiagnosticsEngine &Diags, bool &Success) {
+ auto *Arg = Args.getLastArg(Opt);
+ if (!Arg)
+ return None;
+ IntTy Res;
+ if (StringRef(Arg->getValue()).getAsInteger(0, Res)) {
+ Success = false;
+ Diags.Report(diag::err_drv_invalid_int_value)
+ << Arg->getAsString(Args) << Arg->getValue();
+ return None;
+ }
+ return Res;
+}
- llvm_unreachable("The simple enum value was not correctly defined in "
- "the tablegen option description");
+static Optional<std::vector<std::string>>
+normalizeStringVector(OptSpecifier Opt, int, const ArgList &Args,
+ DiagnosticsEngine &, bool &Success) {
+ return Args.getAllArgValues(Opt);
}
-static const char *denormalizeString(CompilerInvocation::StringAllocator SA,
- unsigned TableIndex,
- const std::string &Value) {
- return SA(Value);
+static void denormalizeStringVector(SmallVectorImpl<const char *> &Args,
+ const char *Spelling,
+ CompilerInvocation::StringAllocator SA,
+ Option::OptionClass OptClass,
+ unsigned TableIndex,
+ const std::vector<std::string> &Values) {
+ switch (OptClass) {
+ case Option::CommaJoinedClass: {
+ std::string CommaJoinedValue;
+ if (!Values.empty()) {
+ CommaJoinedValue.append(Values.front());
+ for (const std::string &Value : llvm::drop_begin(Values, 1)) {
+ CommaJoinedValue.append(",");
+ CommaJoinedValue.append(Value);
+ }
+ }
+ denormalizeString(Args, Spelling, SA, Option::OptionClass::JoinedClass,
+ TableIndex, CommaJoinedValue);
+ break;
+ }
+ case Option::JoinedClass:
+ case Option::SeparateClass:
+ case Option::JoinedOrSeparateClass:
+ for (const std::string &Value : Values)
+ denormalizeString(Args, Spelling, SA, OptClass, TableIndex, Value);
+ break;
+ default:
+ llvm_unreachable("Cannot denormalize an option with option class "
+ "incompatible with string vector denormalization.");
+ }
}
static Optional<std::string> normalizeTriple(OptSpecifier Opt, int TableIndex,
const ArgList &Args,
- DiagnosticsEngine &Diags) {
+ DiagnosticsEngine &Diags,
+ bool &Success) {
auto *Arg = Args.getLastArg(Opt);
if (!Arg)
return None;
return llvm::Triple::normalize(Arg->getValue());
}
+template <typename T, typename U>
+static T mergeForwardValue(T KeyPath, U Value) {
+ return static_cast<T>(Value);
+}
+
+template <typename T, typename U> static T mergeMaskValue(T KeyPath, U Value) {
+ return KeyPath | Value;
+}
+
+template <typename T> static T extractForwardValue(T KeyPath) {
+ return KeyPath;
+}
+
+template <typename T, typename U, U Value>
+static T extractMaskValue(T KeyPath) {
+ return KeyPath & Value;
+}
+
+#define PARSE_OPTION_WITH_MARSHALLING(ARGS, DIAGS, SUCCESS, ID, FLAGS, PARAM, \
+ SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, \
+ NORMALIZER, MERGER, TABLE_INDEX) \
+ if ((FLAGS)&options::CC1Option) { \
+ KEYPATH = MERGER(KEYPATH, DEFAULT_VALUE); \
+ if (IMPLIED_CHECK) \
+ KEYPATH = MERGER(KEYPATH, IMPLIED_VALUE); \
+ if (SHOULD_PARSE) \
+ if (auto MaybeValue = \
+ NORMALIZER(OPT_##ID, TABLE_INDEX, ARGS, DIAGS, SUCCESS)) \
+ KEYPATH = \
+ MERGER(KEYPATH, static_cast<decltype(KEYPATH)>(*MaybeValue)); \
+ }
+
+static const StringRef GetInputKindName(InputKind IK);
+
+static void FixupInvocation(CompilerInvocation &Invocation,
+ DiagnosticsEngine &Diags, const InputArgList &Args,
+ InputKind IK) {
+ LangOptions &LangOpts = *Invocation.getLangOpts();
+ CodeGenOptions &CodeGenOpts = Invocation.getCodeGenOpts();
+ TargetOptions &TargetOpts = Invocation.getTargetOpts();
+ FrontendOptions &FrontendOpts = Invocation.getFrontendOpts();
+ CodeGenOpts.XRayInstrumentFunctions = LangOpts.XRayInstrument;
+ CodeGenOpts.XRayAlwaysEmitCustomEvents = LangOpts.XRayAlwaysEmitCustomEvents;
+ CodeGenOpts.XRayAlwaysEmitTypedEvents = LangOpts.XRayAlwaysEmitTypedEvents;
+ CodeGenOpts.DisableFree = FrontendOpts.DisableFree;
+ FrontendOpts.GenerateGlobalModuleIndex = FrontendOpts.UseGlobalModuleIndex;
+
+ LangOpts.ForceEmitVTables = CodeGenOpts.ForceEmitVTables;
+ LangOpts.SpeculativeLoadHardening = CodeGenOpts.SpeculativeLoadHardening;
+ LangOpts.CurrentModule = LangOpts.ModuleName;
+
+ llvm::Triple T(TargetOpts.Triple);
+ llvm::Triple::ArchType Arch = T.getArch();
+
+ CodeGenOpts.CodeModel = TargetOpts.CodeModel;
+
+ if (LangOpts.getExceptionHandling() != llvm::ExceptionHandling::None &&
+ T.isWindowsMSVCEnvironment())
+ Diags.Report(diag::err_fe_invalid_exception_model)
+ << static_cast<unsigned>(LangOpts.getExceptionHandling()) << T.str();
+
+ if (LangOpts.AppleKext && !LangOpts.CPlusPlus)
+ Diags.Report(diag::warn_c_kext);
+
+ if (LangOpts.NewAlignOverride &&
+ !llvm::isPowerOf2_32(LangOpts.NewAlignOverride)) {
+ Arg *A = Args.getLastArg(OPT_fnew_alignment_EQ);
+ Diags.Report(diag::err_fe_invalid_alignment)
+ << A->getAsString(Args) << A->getValue();
+ LangOpts.NewAlignOverride = 0;
+ }
+
+ if (Args.hasArg(OPT_fgnu89_inline) && LangOpts.CPlusPlus)
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << "-fgnu89-inline" << GetInputKindName(IK);
+
+ if (Args.hasArg(OPT_fgpu_allow_device_init) && !LangOpts.HIP)
+ Diags.Report(diag::warn_ignored_hip_only_option)
+ << Args.getLastArg(OPT_fgpu_allow_device_init)->getAsString(Args);
+
+ if (Args.hasArg(OPT_gpu_max_threads_per_block_EQ) && !LangOpts.HIP)
+ Diags.Report(diag::warn_ignored_hip_only_option)
+ << Args.getLastArg(OPT_gpu_max_threads_per_block_EQ)->getAsString(Args);
+
+ // -cl-strict-aliasing needs to emit diagnostic in the case where CL > 1.0.
+ // This option should be deprecated for CL > 1.0 because
+ // this option was added for compatibility with OpenCL 1.0.
+ if (Args.getLastArg(OPT_cl_strict_aliasing) && LangOpts.OpenCLVersion > 100)
+ Diags.Report(diag::warn_option_invalid_ocl_version)
+ << LangOpts.getOpenCLVersionTuple().getAsString()
+ << Args.getLastArg(OPT_cl_strict_aliasing)->getAsString(Args);
+
+ if (Arg *A = Args.getLastArg(OPT_fdefault_calling_conv_EQ)) {
+ auto DefaultCC = LangOpts.getDefaultCallingConv();
+
+ bool emitError = (DefaultCC == LangOptions::DCC_FastCall ||
+ DefaultCC == LangOptions::DCC_StdCall) &&
+ Arch != llvm::Triple::x86;
+ emitError |= (DefaultCC == LangOptions::DCC_VectorCall ||
+ DefaultCC == LangOptions::DCC_RegCall) &&
+ !T.isX86();
+ if (emitError)
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getSpelling() << T.getTriple();
+ }
+
+ if (!CodeGenOpts.ProfileRemappingFile.empty() && CodeGenOpts.LegacyPassManager)
+ Diags.Report(diag::err_drv_argument_only_allowed_with)
+ << Args.getLastArg(OPT_fprofile_remapping_file_EQ)->getAsString(Args)
+ << "-fno-legacy-pass-manager";
+}
+
//===----------------------------------------------------------------------===//
// Deserialization (from args)
//===----------------------------------------------------------------------===//
@@ -221,6 +528,11 @@ static unsigned getOptimizationLevelSize(ArgList &Args) {
return 0;
}
+static std::string GetOptName(llvm::opt::OptSpecifier OptSpecifier) {
+ static const OptTable &OptTable = getDriverOptTable();
+ return OptTable.getOption(OptSpecifier).getPrefixedName();
+}
+
static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
OptSpecifier GroupWithValue,
std::vector<std::string> &Diagnostics) {
@@ -344,48 +656,6 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
}
- Opts.ShowCheckerHelp = Args.hasArg(OPT_analyzer_checker_help);
- Opts.ShowCheckerHelpAlpha = Args.hasArg(OPT_analyzer_checker_help_alpha);
- Opts.ShowCheckerHelpDeveloper =
- Args.hasArg(OPT_analyzer_checker_help_developer);
-
- Opts.ShowCheckerOptionList = Args.hasArg(OPT_analyzer_checker_option_help);
- Opts.ShowCheckerOptionAlphaList =
- Args.hasArg(OPT_analyzer_checker_option_help_alpha);
- Opts.ShowCheckerOptionDeveloperList =
- Args.hasArg(OPT_analyzer_checker_option_help_developer);
-
- Opts.ShowConfigOptionsList = Args.hasArg(OPT_analyzer_config_help);
- Opts.ShowEnabledCheckerList = Args.hasArg(OPT_analyzer_list_enabled_checkers);
- Opts.ShouldEmitErrorsOnInvalidConfigValue =
- /* negated */!llvm::StringSwitch<bool>(
- Args.getLastArgValue(OPT_analyzer_config_compatibility_mode))
- .Case("true", true)
- .Case("false", false)
- .Default(false);
- Opts.DisableAllCheckers = Args.hasArg(OPT_analyzer_disable_all_checks);
-
- Opts.visualizeExplodedGraphWithGraphViz =
- Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
- Opts.DumpExplodedGraphTo =
- std::string(Args.getLastArgValue(OPT_analyzer_dump_egraph));
- Opts.NoRetryExhausted = Args.hasArg(OPT_analyzer_disable_retry_exhausted);
- Opts.AnalyzerWerror = Args.hasArg(OPT_analyzer_werror);
- Opts.AnalyzeAll = Args.hasArg(OPT_analyzer_opt_analyze_headers);
- Opts.AnalyzerDisplayProgress = Args.hasArg(OPT_analyzer_display_progress);
- Opts.AnalyzeNestedBlocks =
- Args.hasArg(OPT_analyzer_opt_analyze_nested_blocks);
- Opts.AnalyzeSpecificFunction =
- std::string(Args.getLastArgValue(OPT_analyze_function));
- Opts.UnoptimizedCFG = Args.hasArg(OPT_analysis_UnoptimizedCFG);
- Opts.TrimGraph = Args.hasArg(OPT_trim_egraph);
- Opts.maxBlockVisitOnPath =
- getLastArgIntValue(Args, OPT_analyzer_max_loop, 4, Diags);
- Opts.PrintStats = Args.hasArg(OPT_analyzer_stats);
- Opts.InlineMaxStackDepth =
- getLastArgIntValue(Args, OPT_analyzer_inline_max_stack_depth,
- Opts.InlineMaxStackDepth, Diags);
-
Opts.CheckersAndPackages.clear();
for (const Arg *A :
Args.filtered(OPT_analyzer_checker, OPT_analyzer_disable_checker)) {
@@ -573,17 +843,6 @@ static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
<< "a filename";
}
-static bool ParseMigratorArgs(MigratorOptions &Opts, ArgList &Args) {
- Opts.NoNSAllocReallocError = Args.hasArg(OPT_migrator_no_nsalloc_error);
- Opts.NoFinalizeRemoval = Args.hasArg(OPT_migrator_no_finalize_removal);
- return true;
-}
-
-static void ParseCommentArgs(CommentOptions &Opts, ArgList &Args) {
- Opts.BlockCommandNames = Args.getAllArgValues(OPT_fcomment_block_commands);
- Opts.ParseAllComments = Args.hasArg(OPT_fparse_all_comments);
-}
-
/// Create a new Regex instance out of the string value in \p RpassArg.
/// It returns a pointer to the newly generated Regex instance.
static std::shared_ptr<llvm::Regex>
@@ -602,7 +861,7 @@ GenerateOptimizationRemarkRegex(DiagnosticsEngine &Diags, ArgList &Args,
static bool parseDiagnosticLevelMask(StringRef FlagName,
const std::vector<std::string> &Levels,
- DiagnosticsEngine *Diags,
+ DiagnosticsEngine &Diags,
DiagnosticLevelMask &M) {
bool Success = true;
for (const auto &Level : Levels) {
@@ -615,8 +874,7 @@ static bool parseDiagnosticLevelMask(StringRef FlagName,
.Default(DiagnosticLevelMask::None);
if (PM == DiagnosticLevelMask::None) {
Success = false;
- if (Diags)
- Diags->Report(diag::err_drv_invalid_value) << FlagName << Level;
+ Diags.Report(diag::err_drv_invalid_value) << FlagName << Level;
}
M = M | PM;
}
@@ -654,28 +912,6 @@ static void parseXRayInstrumentationBundle(StringRef FlagName, StringRef Bundle,
}
}
-// Set the profile kind for fprofile-instrument.
-static void setPGOInstrumentor(CodeGenOptions &Opts, ArgList &Args,
- DiagnosticsEngine &Diags) {
- Arg *A = Args.getLastArg(OPT_fprofile_instrument_EQ);
- if (A == nullptr)
- return;
- StringRef S = A->getValue();
- unsigned I = llvm::StringSwitch<unsigned>(S)
- .Case("none", CodeGenOptions::ProfileNone)
- .Case("clang", CodeGenOptions::ProfileClangInstr)
- .Case("llvm", CodeGenOptions::ProfileIRInstr)
- .Case("csllvm", CodeGenOptions::ProfileCSIRInstr)
- .Default(~0U);
- if (I == ~0U) {
- Diags.Report(diag::err_drv_invalid_pgo_instrumentor) << A->getAsString(Args)
- << S;
- return;
- }
- auto Instrumentor = static_cast<CodeGenOptions::ProfileInstrKind>(I);
- Opts.setProfileInstr(Instrumentor);
-}
-
// Set the profile kind using fprofile-instrument-use-path.
static void setPGOUseInstrumentor(CodeGenOptions &Opts,
const Twine &ProfileName) {
@@ -697,12 +933,13 @@ static void setPGOUseInstrumentor(CodeGenOptions &Opts,
Opts.setProfileUse(CodeGenOptions::ProfileClangInstr);
}
-static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
- DiagnosticsEngine &Diags,
- const TargetOptions &TargetOpts,
- const FrontendOptions &FrontendOpts) {
+bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
+ InputKind IK,
+ DiagnosticsEngine &Diags,
+ const llvm::Triple &T,
+ const std::string &OutputFile,
+ const LangOptions &LangOptsRef) {
bool Success = true;
- llvm::Triple Triple = llvm::Triple(TargetOpts.Triple);
unsigned OptimizationLevel = getOptimizationLevel(Args, IK, Diags);
// TODO: This could be done in Driver
@@ -716,6 +953,25 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
Opts.OptimizationLevel = OptimizationLevel;
+ // The key paths of codegen options defined in Options.td start with
+ // "CodeGenOpts.". Let's provide the expected variable name and type.
+ CodeGenOptions &CodeGenOpts = Opts;
+ // Some codegen options depend on language options. Let's provide the expected
+ // variable name and type.
+ const LangOptions *LangOpts = &LangOptsRef;
+
+#define CODEGEN_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, Success, ID, FLAGS, PARAM, \
+ SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
+ MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef CODEGEN_OPTION_WITH_MARSHALLING
+
// At O0 we want to fully disable inlining outside of cases marked with
// 'alwaysinline' that are required for correctness.
Opts.setInlining((Opts.OptimizationLevel == 0)
@@ -737,73 +993,18 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
}
- Opts.ExperimentalNewPassManager = Args.hasFlag(
- OPT_fexperimental_new_pass_manager, OPT_fno_experimental_new_pass_manager,
- /* Default */ ENABLE_EXPERIMENTAL_NEW_PASS_MANAGER);
-
- Opts.DebugPassManager =
- Args.hasFlag(OPT_fdebug_pass_manager, OPT_fno_debug_pass_manager,
- /* Default */ false);
+ // PIC defaults to -fno-direct-access-external-data while non-PIC defaults to
+ // -fdirect-access-external-data.
+ Opts.DirectAccessExternalData =
+ Args.hasArg(OPT_fdirect_access_external_data) ||
+ (!Args.hasArg(OPT_fno_direct_access_external_data) &&
+ getLastArgIntValue(Args, OPT_pic_level, 0, Diags) == 0);
- if (Arg *A = Args.getLastArg(OPT_fveclib)) {
- StringRef Name = A->getValue();
- if (Name == "Accelerate")
- Opts.setVecLib(CodeGenOptions::Accelerate);
- else if (Name == "MASSV")
- Opts.setVecLib(CodeGenOptions::MASSV);
- else if (Name == "SVML")
- Opts.setVecLib(CodeGenOptions::SVML);
- else if (Name == "none")
- Opts.setVecLib(CodeGenOptions::NoLibrary);
- else
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
- }
-
- if (Arg *A = Args.getLastArg(OPT_debug_info_kind_EQ)) {
- unsigned Val =
- llvm::StringSwitch<unsigned>(A->getValue())
- .Case("line-tables-only", codegenoptions::DebugLineTablesOnly)
- .Case("line-directives-only", codegenoptions::DebugDirectivesOnly)
- .Case("constructor", codegenoptions::DebugInfoConstructor)
- .Case("limited", codegenoptions::LimitedDebugInfo)
- .Case("standalone", codegenoptions::FullDebugInfo)
- .Default(~0U);
- if (Val == ~0U)
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
- << A->getValue();
- else
- Opts.setDebugInfo(static_cast<codegenoptions::DebugInfoKind>(Val));
- }
- if (Arg *A = Args.getLastArg(OPT_debugger_tuning_EQ)) {
- unsigned Val = llvm::StringSwitch<unsigned>(A->getValue())
- .Case("gdb", unsigned(llvm::DebuggerKind::GDB))
- .Case("lldb", unsigned(llvm::DebuggerKind::LLDB))
- .Case("sce", unsigned(llvm::DebuggerKind::SCE))
- .Default(~0U);
- if (Val == ~0U)
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
- << A->getValue();
- else
- Opts.setDebuggerTuning(static_cast<llvm::DebuggerKind>(Val));
- }
- Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 0, Diags);
- Opts.DebugColumnInfo = !Args.hasArg(OPT_gno_column_info);
- Opts.EmitCodeView = Args.hasArg(OPT_gcodeview);
- Opts.CodeViewGHash = Args.hasArg(OPT_gcodeview_ghash);
- Opts.MacroDebugInfo = Args.hasArg(OPT_debug_info_macro);
- Opts.WholeProgramVTables = Args.hasArg(OPT_fwhole_program_vtables);
- Opts.VirtualFunctionElimination =
- Args.hasArg(OPT_fvirtual_function_elimination);
- Opts.LTOVisibilityPublicStd = Args.hasArg(OPT_flto_visibility_public_std);
- Opts.SplitDwarfFile = std::string(Args.getLastArgValue(OPT_split_dwarf_file));
- Opts.SplitDwarfOutput =
- std::string(Args.getLastArgValue(OPT_split_dwarf_output));
- Opts.SplitDwarfInlining = !Args.hasArg(OPT_fno_split_dwarf_inlining);
- Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);
- Opts.DebugExplicitImport = Args.hasArg(OPT_dwarf_explicit_import);
- Opts.DebugFwdTemplateParams = Args.hasArg(OPT_debug_forward_template_params);
- Opts.EmbedSource = Args.hasArg(OPT_gembed_source);
- Opts.ForceDwarfFrameSection = Args.hasArg(OPT_fforce_dwarf_frame);
+ // If -fuse-ctor-homing is set and limited debug info is already on, then use
+ // constructor homing.
+ if (Args.getLastArg(OPT_fuse_ctor_homing))
+ if (Opts.getDebugInfo() == codegenoptions::LimitedDebugInfo)
+ Opts.setDebugInfo(codegenoptions::DebugInfoConstructor);
for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ)) {
auto Split = StringRef(Arg).split('=');
@@ -811,44 +1012,23 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
{std::string(Split.first), std::string(Split.second)});
}
- if (const Arg *A =
- Args.getLastArg(OPT_emit_llvm_uselists, OPT_no_emit_llvm_uselists))
- Opts.EmitLLVMUseLists = A->getOption().getID() == OPT_emit_llvm_uselists;
-
- Opts.DisableLLVMPasses = Args.hasArg(OPT_disable_llvm_passes);
- Opts.DisableLifetimeMarkers = Args.hasArg(OPT_disable_lifetimemarkers);
+ for (const auto &Arg : Args.getAllArgValues(OPT_fprofile_prefix_map_EQ)) {
+ auto Split = StringRef(Arg).split('=');
+ Opts.ProfilePrefixMap.insert(
+ {std::string(Split.first), std::string(Split.second)});
+ }
const llvm::Triple::ArchType DebugEntryValueArchs[] = {
llvm::Triple::x86, llvm::Triple::x86_64, llvm::Triple::aarch64,
llvm::Triple::arm, llvm::Triple::armeb, llvm::Triple::mips,
llvm::Triple::mipsel, llvm::Triple::mips64, llvm::Triple::mips64el};
- llvm::Triple T(TargetOpts.Triple);
if (Opts.OptimizationLevel > 0 && Opts.hasReducedDebugInfo() &&
llvm::is_contained(DebugEntryValueArchs, T.getArch()))
Opts.EmitCallSiteInfo = true;
- Opts.DisableO0ImplyOptNone = Args.hasArg(OPT_disable_O0_optnone);
- Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
- Opts.IndirectTlsSegRefs = Args.hasArg(OPT_mno_tls_direct_seg_refs);
- Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables);
- Opts.UseRegisterSizedBitfieldAccess = Args.hasArg(
- OPT_fuse_register_sized_bitfield_access);
- Opts.RelaxedAliasing = Args.hasArg(OPT_relaxed_aliasing);
- Opts.StructPathTBAA = !Args.hasArg(OPT_no_struct_path_tbaa);
Opts.NewStructPathTBAA = !Args.hasArg(OPT_no_struct_path_tbaa) &&
Args.hasArg(OPT_new_struct_path_tbaa);
- Opts.FineGrainedBitfieldAccesses =
- Args.hasFlag(OPT_ffine_grained_bitfield_accesses,
- OPT_fno_fine_grained_bitfield_accesses, false);
- Opts.DwarfDebugFlags =
- std::string(Args.getLastArgValue(OPT_dwarf_debug_flags));
- Opts.RecordCommandLine =
- std::string(Args.getLastArgValue(OPT_record_command_line));
- Opts.MergeAllConstants = Args.hasArg(OPT_fmerge_all_constants);
- Opts.NoCommon = !Args.hasArg(OPT_fcommon);
- Opts.NoInlineLineTables = Args.hasArg(OPT_gno_inline_line_tables);
- Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float);
Opts.OptimizeSize = getOptimizationLevelSize(Args);
Opts.SimplifyLibCalls = !(Args.hasArg(OPT_fno_builtin) ||
Args.hasArg(OPT_ffreestanding));
@@ -857,145 +1037,45 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.UnrollLoops =
Args.hasFlag(OPT_funroll_loops, OPT_fno_unroll_loops,
(Opts.OptimizationLevel > 1));
- Opts.RerollLoops = Args.hasArg(OPT_freroll_loops);
-
- Opts.DisableIntegratedAS = Args.hasArg(OPT_fno_integrated_as);
- Opts.Autolink = !Args.hasArg(OPT_fno_autolink);
- Opts.SampleProfileFile =
- std::string(Args.getLastArgValue(OPT_fprofile_sample_use_EQ));
- Opts.DebugInfoForProfiling = Args.hasFlag(
- OPT_fdebug_info_for_profiling, OPT_fno_debug_info_for_profiling, false);
+
+ Opts.BinutilsVersion =
+ std::string(Args.getLastArgValue(OPT_fbinutils_version_EQ));
+
Opts.DebugNameTable = static_cast<unsigned>(
Args.hasArg(OPT_ggnu_pubnames)
? llvm::DICompileUnit::DebugNameTableKind::GNU
: Args.hasArg(OPT_gpubnames)
? llvm::DICompileUnit::DebugNameTableKind::Default
: llvm::DICompileUnit::DebugNameTableKind::None);
- Opts.DebugRangesBaseAddress = Args.hasArg(OPT_fdebug_ranges_base_address);
- setPGOInstrumentor(Opts, Args, Diags);
- Opts.InstrProfileOutput =
- std::string(Args.getLastArgValue(OPT_fprofile_instrument_path_EQ));
- Opts.ProfileInstrumentUsePath =
- std::string(Args.getLastArgValue(OPT_fprofile_instrument_use_path_EQ));
if (!Opts.ProfileInstrumentUsePath.empty())
setPGOUseInstrumentor(Opts, Opts.ProfileInstrumentUsePath);
- Opts.ProfileRemappingFile =
- std::string(Args.getLastArgValue(OPT_fprofile_remapping_file_EQ));
- if (!Opts.ProfileRemappingFile.empty() && !Opts.ExperimentalNewPassManager) {
- Diags.Report(diag::err_drv_argument_only_allowed_with)
- << Args.getLastArg(OPT_fprofile_remapping_file_EQ)->getAsString(Args)
- << "-fexperimental-new-pass-manager";
- }
-
- Opts.CoverageMapping =
- Args.hasFlag(OPT_fcoverage_mapping, OPT_fno_coverage_mapping, false);
- Opts.DumpCoverageMapping = Args.hasArg(OPT_dump_coverage_mapping);
- Opts.AsmVerbose = !Args.hasArg(OPT_fno_verbose_asm);
- Opts.PreserveAsmComments = !Args.hasArg(OPT_fno_preserve_as_comments);
- Opts.AssumeSaneOperatorNew = !Args.hasArg(OPT_fno_assume_sane_operator_new);
- Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
- Opts.CXAAtExit = !Args.hasArg(OPT_fno_use_cxa_atexit);
- Opts.RegisterGlobalDtorsWithAtExit =
- Args.hasArg(OPT_fregister_global_dtors_with_atexit);
- Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
- Opts.CodeModel = TargetOpts.CodeModel;
- Opts.DebugPass = std::string(Args.getLastArgValue(OPT_mdebug_pass));
-
- // Handle -mframe-pointer option.
- if (Arg *A = Args.getLastArg(OPT_mframe_pointer_EQ)) {
- CodeGenOptions::FramePointerKind FP;
- StringRef Name = A->getValue();
- bool ValidFP = true;
- if (Name == "none")
- FP = CodeGenOptions::FramePointerKind::None;
- else if (Name == "non-leaf")
- FP = CodeGenOptions::FramePointerKind::NonLeaf;
- else if (Name == "all")
- FP = CodeGenOptions::FramePointerKind::All;
- else {
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
- Success = false;
- ValidFP = false;
- }
- if (ValidFP)
- Opts.setFramePointer(FP);
- }
-
- Opts.DisableFree = Args.hasArg(OPT_disable_free);
- Opts.DiscardValueNames = Args.hasArg(OPT_discard_value_names);
- Opts.DisableTailCalls = Args.hasArg(OPT_mdisable_tail_calls);
- Opts.NoEscapingBlockTailCalls =
- Args.hasArg(OPT_fno_escaping_block_tail_calls);
- Opts.FloatABI = std::string(Args.getLastArgValue(OPT_mfloat_abi));
- Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.LimitFloatPrecision =
- std::string(Args.getLastArgValue(OPT_mlimit_float_precision));
- Opts.CorrectlyRoundedDivSqrt =
- Args.hasArg(OPT_cl_fp32_correctly_rounded_divide_sqrt);
- Opts.UniformWGSize =
- Args.hasArg(OPT_cl_uniform_work_group_size);
- Opts.Reciprocals = Args.getAllArgValues(OPT_mrecip_EQ);
- Opts.StrictFloatCastOverflow =
- !Args.hasArg(OPT_fno_strict_float_cast_overflow);
-
- Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_fno_zero_initialized_in_bss);
- Opts.NumRegisterParameters = getLastArgIntValue(Args, OPT_mregparm, 0, Diags);
- Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack);
- Opts.SmallDataLimit =
- getLastArgIntValue(Args, OPT_msmall_data_limit, 0, Diags);
- Opts.FatalWarnings = Args.hasArg(OPT_massembler_fatal_warnings);
- Opts.NoWarn = Args.hasArg(OPT_massembler_no_warn);
- Opts.EnableSegmentedStacks = Args.hasArg(OPT_split_stacks);
- Opts.RelaxAll = Args.hasArg(OPT_mrelax_all);
- Opts.IncrementalLinkerCompatible =
- Args.hasArg(OPT_mincremental_linker_compatible);
- Opts.PIECopyRelocations =
- Args.hasArg(OPT_mpie_copy_relocations);
- Opts.NoPLT = Args.hasArg(OPT_fno_plt);
- Opts.SaveTempLabels = Args.hasArg(OPT_msave_temp_labels);
- Opts.NoDwarfDirectoryAsm = Args.hasArg(OPT_fno_dwarf_directory_asm);
- Opts.SoftFloat = Args.hasArg(OPT_msoft_float);
- Opts.StrictEnums = Args.hasArg(OPT_fstrict_enums);
- Opts.StrictReturn = !Args.hasArg(OPT_fno_strict_return);
- Opts.StrictVTablePointers = Args.hasArg(OPT_fstrict_vtable_pointers);
- Opts.ForceEmitVTables = Args.hasArg(OPT_fforce_emit_vtables);
- Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
- Opts.ThreadModel =
- std::string(Args.getLastArgValue(OPT_mthread_model, "posix"));
- if (Opts.ThreadModel != "posix" && Opts.ThreadModel != "single")
- Diags.Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_mthread_model)->getAsString(Args)
- << Opts.ThreadModel;
- Opts.TrapFuncName = std::string(Args.getLastArgValue(OPT_ftrap_function_EQ));
- Opts.UseInitArray = !Args.hasArg(OPT_fno_use_init_array);
- Opts.BBSections =
- std::string(Args.getLastArgValue(OPT_fbasic_block_sections_EQ, "none"));
+ if (const Arg *A = Args.getLastArg(OPT_ftime_report, OPT_ftime_report_EQ)) {
+ Opts.TimePasses = true;
+
+ // -ftime-report= is only for new pass manager.
+ if (A->getOption().getID() == OPT_ftime_report_EQ) {
+ if (Opts.LegacyPassManager)
+ Diags.Report(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-fno-legacy-pass-manager";
+
+ StringRef Val = A->getValue();
+ if (Val == "per-pass")
+ Opts.TimePassesPerRun = false;
+ else if (Val == "per-pass-run")
+ Opts.TimePassesPerRun = true;
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
+ }
// Basic Block Sections implies Function Sections.
Opts.FunctionSections =
Args.hasArg(OPT_ffunction_sections) ||
(Opts.BBSections != "none" && Opts.BBSections != "labels");
- Opts.DataSections = Args.hasArg(OPT_fdata_sections);
- Opts.StackSizeSection = Args.hasArg(OPT_fstack_size_section);
- Opts.UniqueSectionNames = !Args.hasArg(OPT_fno_unique_section_names);
- Opts.UniqueBasicBlockSectionNames =
- Args.hasArg(OPT_funique_basic_block_section_names);
- Opts.UniqueInternalLinkageNames =
- Args.hasArg(OPT_funique_internal_linkage_names);
-
- Opts.MergeFunctions = Args.hasArg(OPT_fmerge_functions);
-
- Opts.NoUseJumpTables = Args.hasArg(OPT_fno_jump_tables);
-
- Opts.NullPointerIsValid = Args.hasArg(OPT_fno_delete_null_pointer_checks);
-
- Opts.ProfileSampleAccurate = Args.hasArg(OPT_fprofile_sample_accurate);
-
Opts.PrepareForLTO = Args.hasArg(OPT_flto, OPT_flto_EQ);
Opts.PrepareForThinLTO = false;
if (Arg *A = Args.getLastArg(OPT_flto_EQ)) {
@@ -1005,8 +1085,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
else if (S != "full")
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << S;
}
- Opts.LTOUnit = Args.hasFlag(OPT_flto_unit, OPT_fno_lto_unit, false);
- Opts.EnableSplitLTOUnit = Args.hasArg(OPT_fsplit_lto_unit);
if (Arg *A = Args.getLastArg(OPT_fthinlto_index_EQ)) {
if (IK.getLanguage() != Language::LLVM_IR)
Diags.Report(diag::err_drv_argument_only_allowed_with)
@@ -1017,38 +1095,20 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
if (Arg *A = Args.getLastArg(OPT_save_temps_EQ))
Opts.SaveTempsFilePrefix =
llvm::StringSwitch<std::string>(A->getValue())
- .Case("obj", FrontendOpts.OutputFile)
- .Default(llvm::sys::path::filename(FrontendOpts.OutputFile).str());
-
- Opts.ThinLinkBitcodeFile =
- std::string(Args.getLastArgValue(OPT_fthin_link_bitcode_EQ));
-
- Opts.MSVolatile = Args.hasArg(OPT_fms_volatile);
-
- Opts.VectorizeLoop = Args.hasArg(OPT_vectorize_loops);
- Opts.VectorizeSLP = Args.hasArg(OPT_vectorize_slp);
+ .Case("obj", OutputFile)
+ .Default(llvm::sys::path::filename(OutputFile).str());
+
+ // The memory profile runtime appends the pid to make this name more unique.
+ const char *MemProfileBasename = "memprof.profraw";
+ if (Args.hasArg(OPT_fmemory_profile_EQ)) {
+ SmallString<128> Path(
+ std::string(Args.getLastArgValue(OPT_fmemory_profile_EQ)));
+ llvm::sys::path::append(Path, MemProfileBasename);
+ Opts.MemoryProfileOutput = std::string(Path);
+ } else if (Args.hasArg(OPT_fmemory_profile))
+ Opts.MemoryProfileOutput = MemProfileBasename;
- Opts.PreferVectorWidth =
- std::string(Args.getLastArgValue(OPT_mprefer_vector_width_EQ));
-
- Opts.MainFileName = std::string(Args.getLastArgValue(OPT_main_file_name));
- Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier);
-
- Opts.ControlFlowGuardNoChecks = Args.hasArg(OPT_cfguard_no_checks);
- Opts.ControlFlowGuard = Args.hasArg(OPT_cfguard);
-
- Opts.DisableGCov = Args.hasArg(OPT_test_coverage);
- Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
- Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
if (Opts.EmitGcovArcs || Opts.EmitGcovNotes) {
- Opts.CoverageDataFile =
- std::string(Args.getLastArgValue(OPT_coverage_data_file));
- Opts.CoverageNotesFile =
- std::string(Args.getLastArgValue(OPT_coverage_notes_file));
- Opts.ProfileFilterFiles =
- std::string(Args.getLastArgValue(OPT_fprofile_filter_files_EQ));
- Opts.ProfileExcludeFiles =
- std::string(Args.getLastArgValue(OPT_fprofile_exclude_files_EQ));
if (Args.hasArg(OPT_coverage_version_EQ)) {
StringRef CoverageVersion = Args.getLastArgValue(OPT_coverage_version_EQ);
if (CoverageVersion.size() != 4) {
@@ -1060,63 +1120,27 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
}
}
- // Handle -fembed-bitcode option.
- if (Arg *A = Args.getLastArg(OPT_fembed_bitcode_EQ)) {
- StringRef Name = A->getValue();
- unsigned Model = llvm::StringSwitch<unsigned>(Name)
- .Case("off", CodeGenOptions::Embed_Off)
- .Case("all", CodeGenOptions::Embed_All)
- .Case("bitcode", CodeGenOptions::Embed_Bitcode)
- .Case("marker", CodeGenOptions::Embed_Marker)
- .Default(~0U);
- if (Model == ~0U) {
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
- Success = false;
- } else
- Opts.setEmbedBitcode(
- static_cast<CodeGenOptions::EmbedBitcodeKind>(Model));
- }
// FIXME: For backend options that are not yet recorded as function
// attributes in the IR, keep track of them so we can embed them in a
// separate data section and use them when building the bitcode.
- if (Opts.getEmbedBitcode() == CodeGenOptions::Embed_All) {
- for (const auto &A : Args) {
- // Do not encode output and input.
- if (A->getOption().getID() == options::OPT_o ||
- A->getOption().getID() == options::OPT_INPUT ||
- A->getOption().getID() == options::OPT_x ||
- A->getOption().getID() == options::OPT_fembed_bitcode ||
- A->getOption().matches(options::OPT_W_Group))
- continue;
- ArgStringList ASL;
- A->render(Args, ASL);
- for (const auto &arg : ASL) {
- StringRef ArgStr(arg);
- Opts.CmdArgs.insert(Opts.CmdArgs.end(), ArgStr.begin(), ArgStr.end());
- // using \00 to separate each commandline options.
- Opts.CmdArgs.push_back('\0');
- }
+ for (const auto &A : Args) {
+ // Do not encode output and input.
+ if (A->getOption().getID() == options::OPT_o ||
+ A->getOption().getID() == options::OPT_INPUT ||
+ A->getOption().getID() == options::OPT_x ||
+ A->getOption().getID() == options::OPT_fembed_bitcode ||
+ A->getOption().matches(options::OPT_W_Group))
+ continue;
+ ArgStringList ASL;
+ A->render(Args, ASL);
+ for (const auto &arg : ASL) {
+ StringRef ArgStr(arg);
+ Opts.CmdArgs.insert(Opts.CmdArgs.end(), ArgStr.begin(), ArgStr.end());
+ // using \00 to separate each commandline options.
+ Opts.CmdArgs.push_back('\0');
}
}
- Opts.PreserveVec3Type = Args.hasArg(OPT_fpreserve_vec3_type);
- Opts.InstrumentFunctions = Args.hasArg(OPT_finstrument_functions);
- Opts.InstrumentFunctionsAfterInlining =
- Args.hasArg(OPT_finstrument_functions_after_inlining);
- Opts.InstrumentFunctionEntryBare =
- Args.hasArg(OPT_finstrument_function_entry_bare);
-
- Opts.XRayInstrumentFunctions =
- Args.hasArg(OPT_fxray_instrument);
- Opts.XRayAlwaysEmitCustomEvents =
- Args.hasArg(OPT_fxray_always_emit_customevents);
- Opts.XRayAlwaysEmitTypedEvents =
- Args.hasArg(OPT_fxray_always_emit_typedevents);
- Opts.XRayInstructionThreshold =
- getLastArgIntValue(Args, OPT_fxray_instruction_threshold_EQ, 200, Diags);
- Opts.XRayIgnoreLoops = Args.hasArg(OPT_fxray_ignore_loops);
- Opts.XRayOmitFunctionIndex = Args.hasArg(OPT_fno_xray_function_index);
-
auto XRayInstrBundles =
Args.getAllArgValues(OPT_fxray_instrumentation_bundle);
if (XRayInstrBundles.empty())
@@ -1126,17 +1150,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
parseXRayInstrumentationBundle("-fxray-instrumentation-bundle=", A, Args,
Diags, Opts.XRayInstrumentationBundle);
- Opts.PatchableFunctionEntryCount =
- getLastArgIntValue(Args, OPT_fpatchable_function_entry_EQ, 0, Diags);
- Opts.PatchableFunctionEntryOffset = getLastArgIntValue(
- Args, OPT_fpatchable_function_entry_offset_EQ, 0, Diags);
- Opts.InstrumentForProfiling = Args.hasArg(OPT_pg);
- Opts.CallFEntry = Args.hasArg(OPT_mfentry);
- Opts.MNopMCount = Args.hasArg(OPT_mnop_mcount);
- Opts.RecordMCount = Args.hasArg(OPT_mrecord_mcount);
- Opts.PackedStack = Args.hasArg(OPT_mpacked_stack);
- Opts.EmitOpenCLArgMetadata = Args.hasArg(OPT_cl_kernel_arg_info);
-
if (const Arg *A = Args.getLastArg(OPT_fcf_protection_EQ)) {
StringRef Name = A->getValue();
if (Name == "full") {
@@ -1152,24 +1165,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
}
- if (const Arg *A = Args.getLastArg(OPT_compress_debug_sections,
- OPT_compress_debug_sections_EQ)) {
- if (A->getOption().getID() == OPT_compress_debug_sections) {
- // TODO: be more clever about the compression type auto-detection
- Opts.setCompressDebugSections(llvm::DebugCompressionType::GNU);
- } else {
- auto DCT = llvm::StringSwitch<llvm::DebugCompressionType>(A->getValue())
- .Case("none", llvm::DebugCompressionType::None)
- .Case("zlib", llvm::DebugCompressionType::Z)
- .Case("zlib-gnu", llvm::DebugCompressionType::GNU)
- .Default(llvm::DebugCompressionType::None);
- Opts.setCompressDebugSections(DCT);
- }
- }
-
- Opts.RelaxELFRelocations = Args.hasArg(OPT_mrelax_relocations);
- Opts.DebugCompilationDir =
- std::string(Args.getLastArgValue(OPT_fdebug_compilation_dir));
for (auto *A :
Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_builtin_bitcode)) {
CodeGenOptions::BitcodeFileToLink F;
@@ -1183,129 +1178,12 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
Opts.LinkBitcodeFiles.push_back(F);
}
- Opts.SanitizeCoverageType =
- getLastArgIntValue(Args, OPT_fsanitize_coverage_type, 0, Diags);
- Opts.SanitizeCoverageIndirectCalls =
- Args.hasArg(OPT_fsanitize_coverage_indirect_calls);
- Opts.SanitizeCoverageTraceBB = Args.hasArg(OPT_fsanitize_coverage_trace_bb);
- Opts.SanitizeCoverageTraceCmp = Args.hasArg(OPT_fsanitize_coverage_trace_cmp);
- Opts.SanitizeCoverageTraceDiv = Args.hasArg(OPT_fsanitize_coverage_trace_div);
- Opts.SanitizeCoverageTraceGep = Args.hasArg(OPT_fsanitize_coverage_trace_gep);
- Opts.SanitizeCoverage8bitCounters =
- Args.hasArg(OPT_fsanitize_coverage_8bit_counters);
- Opts.SanitizeCoverageTracePC = Args.hasArg(OPT_fsanitize_coverage_trace_pc);
- Opts.SanitizeCoverageTracePCGuard =
- Args.hasArg(OPT_fsanitize_coverage_trace_pc_guard);
- Opts.SanitizeCoverageNoPrune = Args.hasArg(OPT_fsanitize_coverage_no_prune);
- Opts.SanitizeCoverageInline8bitCounters =
- Args.hasArg(OPT_fsanitize_coverage_inline_8bit_counters);
- Opts.SanitizeCoverageInlineBoolFlag =
- Args.hasArg(OPT_fsanitize_coverage_inline_bool_flag);
- Opts.SanitizeCoveragePCTable = Args.hasArg(OPT_fsanitize_coverage_pc_table);
- Opts.SanitizeCoverageStackDepth =
- Args.hasArg(OPT_fsanitize_coverage_stack_depth);
- Opts.SanitizeCoverageAllowlistFiles =
- Args.getAllArgValues(OPT_fsanitize_coverage_allowlist);
- Opts.SanitizeCoverageBlocklistFiles =
- Args.getAllArgValues(OPT_fsanitize_coverage_blocklist);
- Opts.SanitizeMemoryTrackOrigins =
- getLastArgIntValue(Args, OPT_fsanitize_memory_track_origins_EQ, 0, Diags);
- Opts.SanitizeMemoryUseAfterDtor =
- Args.hasFlag(OPT_fsanitize_memory_use_after_dtor,
- OPT_fno_sanitize_memory_use_after_dtor,
- false);
- Opts.SanitizeMinimalRuntime = Args.hasArg(OPT_fsanitize_minimal_runtime);
- Opts.SanitizeCfiCrossDso = Args.hasArg(OPT_fsanitize_cfi_cross_dso);
- Opts.SanitizeCfiICallGeneralizePointers =
- Args.hasArg(OPT_fsanitize_cfi_icall_generalize_pointers);
- Opts.SanitizeCfiCanonicalJumpTables =
- Args.hasArg(OPT_fsanitize_cfi_canonical_jump_tables);
- Opts.SanitizeStats = Args.hasArg(OPT_fsanitize_stats);
- if (Arg *A = Args.getLastArg(
- OPT_fsanitize_address_poison_custom_array_cookie,
- OPT_fno_sanitize_address_poison_custom_array_cookie)) {
- Opts.SanitizeAddressPoisonCustomArrayCookie =
- A->getOption().getID() ==
- OPT_fsanitize_address_poison_custom_array_cookie;
- }
- if (Arg *A = Args.getLastArg(OPT_fsanitize_address_use_after_scope,
- OPT_fno_sanitize_address_use_after_scope)) {
- Opts.SanitizeAddressUseAfterScope =
- A->getOption().getID() == OPT_fsanitize_address_use_after_scope;
- }
- Opts.SanitizeAddressGlobalsDeadStripping =
- Args.hasArg(OPT_fsanitize_address_globals_dead_stripping);
- if (Arg *A = Args.getLastArg(OPT_fsanitize_address_use_odr_indicator,
- OPT_fno_sanitize_address_use_odr_indicator)) {
- Opts.SanitizeAddressUseOdrIndicator =
- A->getOption().getID() == OPT_fsanitize_address_use_odr_indicator;
- }
- Opts.SSPBufferSize =
- getLastArgIntValue(Args, OPT_stack_protector_buffer_size, 8, Diags);
- Opts.StackRealignment = Args.hasArg(OPT_mstackrealign);
- if (Arg *A = Args.getLastArg(OPT_mstack_alignment)) {
- StringRef Val = A->getValue();
- unsigned StackAlignment = Opts.StackAlignment;
- Val.getAsInteger(10, StackAlignment);
- Opts.StackAlignment = StackAlignment;
- }
-
- if (Arg *A = Args.getLastArg(OPT_mstack_probe_size)) {
- StringRef Val = A->getValue();
- unsigned StackProbeSize = Opts.StackProbeSize;
- Val.getAsInteger(0, StackProbeSize);
- Opts.StackProbeSize = StackProbeSize;
- }
-
- Opts.NoStackArgProbe = Args.hasArg(OPT_mno_stack_arg_probe);
-
- Opts.StackClashProtector = Args.hasArg(OPT_fstack_clash_protection);
-
- if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) {
- StringRef Name = A->getValue();
- unsigned Method = llvm::StringSwitch<unsigned>(Name)
- .Case("legacy", CodeGenOptions::Legacy)
- .Case("non-legacy", CodeGenOptions::NonLegacy)
- .Case("mixed", CodeGenOptions::Mixed)
- .Default(~0U);
- if (Method == ~0U) {
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
- Success = false;
- } else {
- Opts.setObjCDispatchMethod(
- static_cast<CodeGenOptions::ObjCDispatchMethodKind>(Method));
- }
- }
-
-
- if (Args.hasArg(OPT_fno_objc_convert_messages_to_runtime_calls))
- Opts.ObjCConvertMessagesToRuntimeCalls = 0;
if (Args.getLastArg(OPT_femulated_tls) ||
Args.getLastArg(OPT_fno_emulated_tls)) {
Opts.ExplicitEmulatedTLS = true;
- Opts.EmulatedTLS =
- Args.hasFlag(OPT_femulated_tls, OPT_fno_emulated_tls, false);
- }
-
- if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
- StringRef Name = A->getValue();
- unsigned Model = llvm::StringSwitch<unsigned>(Name)
- .Case("global-dynamic", CodeGenOptions::GeneralDynamicTLSModel)
- .Case("local-dynamic", CodeGenOptions::LocalDynamicTLSModel)
- .Case("initial-exec", CodeGenOptions::InitialExecTLSModel)
- .Case("local-exec", CodeGenOptions::LocalExecTLSModel)
- .Default(~0U);
- if (Model == ~0U) {
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
- Success = false;
- } else {
- Opts.setDefaultTLSModel(static_cast<CodeGenOptions::TLSModel>(Model));
- }
}
- Opts.TLSSize = getLastArgIntValue(Args, OPT_mtls_size_EQ, 0, Diags);
-
if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_EQ)) {
StringRef Val = A->getValue();
Opts.FPDenormalMode = llvm::parseDenormalFPAttribute(Val);
@@ -1342,11 +1220,28 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
}
- Opts.DependentLibraries = Args.getAllArgValues(OPT_dependent_lib);
- Opts.LinkerOptions = Args.getAllArgValues(OPT_linker_option);
+ if (T.isOSAIX() && (Args.hasArg(OPT_mignore_xcoff_visibility) ||
+ !Args.hasArg(OPT_fvisibility)))
+ Opts.IgnoreXCOFFVisibility = 1;
+
+ if (Arg *A =
+ Args.getLastArg(OPT_mabi_EQ_vec_default, OPT_mabi_EQ_vec_extabi)) {
+ if (!T.isOSAIX())
+ Diags.Report(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << T.str();
+
+ const Option &O = A->getOption();
+ if (O.matches(OPT_mabi_EQ_vec_default))
+ Diags.Report(diag::err_aix_default_altivec_abi)
+ << A->getSpelling() << T.str();
+ else {
+ assert(O.matches(OPT_mabi_EQ_vec_extabi));
+ Opts.EnableAIXExtendedAltivecABI = 1;
+ }
+ }
+
bool NeedLocTracking = false;
- Opts.OptRecordFile = std::string(Args.getLastArgValue(OPT_opt_record_file));
if (!Opts.OptRecordFile.empty())
NeedLocTracking = true;
@@ -1378,8 +1273,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
NeedLocTracking = true;
}
- Opts.DiagnosticsWithHotness =
- Args.hasArg(options::OPT_fdiagnostics_show_hotness);
bool UsingSampleProfile = !Opts.SampleProfileFile.empty();
bool UsingProfile = UsingSampleProfile ||
(Opts.getProfileUse() != CodeGenOptions::ProfileNone);
@@ -1390,11 +1283,24 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Diags.Report(diag::warn_drv_diagnostics_hotness_requires_pgo)
<< "-fdiagnostics-show-hotness";
- Opts.DiagnosticsHotnessThreshold = getLastArgUInt64Value(
- Args, options::OPT_fdiagnostics_hotness_threshold_EQ, 0);
- if (Opts.DiagnosticsHotnessThreshold > 0 && !UsingProfile)
- Diags.Report(diag::warn_drv_diagnostics_hotness_requires_pgo)
- << "-fdiagnostics-hotness-threshold=";
+ // Parse remarks hotness threshold. Valid value is either integer or 'auto'.
+ if (auto *arg =
+ Args.getLastArg(options::OPT_fdiagnostics_hotness_threshold_EQ)) {
+ auto ResultOrErr =
+ llvm::remarks::parseHotnessThresholdOption(arg->getValue());
+
+ if (!ResultOrErr) {
+ Diags.Report(diag::err_drv_invalid_diagnotics_hotness_threshold)
+ << "-fdiagnostics-hotness-threshold=";
+ } else {
+ Opts.DiagnosticsHotnessThreshold = *ResultOrErr;
+ if ((!Opts.DiagnosticsHotnessThreshold.hasValue() ||
+ Opts.DiagnosticsHotnessThreshold.getValue() > 0) &&
+ !UsingProfile)
+ Diags.Report(diag::warn_drv_diagnostics_hotness_requires_pgo)
+ << "-fdiagnostics-hotness-threshold=";
+ }
+ }
// If the user requested to use a sample profile for PGO, then the
// backend will need to track source location information so the profile
@@ -1407,8 +1313,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
if (NeedLocTracking && Opts.getDebugInfo() == codegenoptions::NoDebugInfo)
Opts.setDebugInfo(codegenoptions::LocTrackingOnly);
- Opts.RewriteMapFiles = Args.getAllArgValues(OPT_frewrite_map_file);
-
// Parse -fsanitize-recover= arguments.
// FIXME: Report unrecoverable sanitizers incorrectly specified here.
parseSanitizerKinds("-fsanitize-recover=",
@@ -1418,44 +1322,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.getAllArgValues(OPT_fsanitize_trap_EQ), Diags,
Opts.SanitizeTrap);
- Opts.CudaGpuBinaryFileName =
- std::string(Args.getLastArgValue(OPT_fcuda_include_gpubinary));
-
- Opts.Backchain = Args.hasArg(OPT_mbackchain);
-
- Opts.EmitCheckPathComponentsToStrip = getLastArgIntValue(
- Args, OPT_fsanitize_undefined_strip_path_components_EQ, 0, Diags);
-
Opts.EmitVersionIdentMetadata = Args.hasFlag(OPT_Qy, OPT_Qn, true);
- Opts.Addrsig = Args.hasArg(OPT_faddrsig);
-
- Opts.KeepStaticConsts = Args.hasArg(OPT_fkeep_static_consts);
-
- Opts.SpeculativeLoadHardening = Args.hasArg(OPT_mspeculative_load_hardening);
-
- Opts.DefaultFunctionAttrs = Args.getAllArgValues(OPT_default_function_attr);
-
- Opts.PassPlugins = Args.getAllArgValues(OPT_fpass_plugin_EQ);
-
- Opts.SymbolPartition =
- std::string(Args.getLastArgValue(OPT_fsymbol_partition_EQ));
-
- Opts.ForceAAPCSBitfieldLoad = Args.hasArg(OPT_ForceAAPCSBitfieldLoad);
return Success;
}
static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
ArgList &Args) {
- Opts.OutputFile = std::string(Args.getLastArgValue(OPT_dependency_file));
- Opts.Targets = Args.getAllArgValues(OPT_MT);
- Opts.IncludeSystemHeaders = Args.hasArg(OPT_sys_header_deps);
- Opts.IncludeModuleFiles = Args.hasArg(OPT_module_file_deps);
- Opts.UsePhonyTargets = Args.hasArg(OPT_MP);
- Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
- Opts.HeaderIncludeOutputFile =
- std::string(Args.getLastArgValue(OPT_header_include_file));
- Opts.AddMissingHeaderDeps = Args.hasArg(OPT_MG);
if (Args.hasArg(OPT_show_includes)) {
// Writing both /showIncludes and preprocessor output to stdout
// would produce interleaved output, so use stderr for /showIncludes.
@@ -1467,11 +1340,6 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
} else {
Opts.ShowIncludesDest = ShowIncludesDestination::None;
}
- Opts.DOTOutputFile = std::string(Args.getLastArgValue(OPT_dependency_dot));
- Opts.ModuleDependencyOutputDir =
- std::string(Args.getLastArgValue(OPT_module_dependency_dir));
- if (Args.hasArg(OPT_MV))
- Opts.OutputFormat = DependencyOutputFormat::NMake;
// Add sanitizer blacklists as extra dependencies.
// They won't be discovered by the regular preprocessor, so
// we let make / ninja to know about this implicit dependency.
@@ -1490,6 +1358,10 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
}
}
+ // -fprofile-list= dependencies.
+ for (const auto &Filename : Args.getAllArgValues(OPT_fprofile_list_EQ))
+ Opts.ExtraDeps.push_back(Filename);
+
// Propagate the extra dependencies.
for (const auto *A : Args.filtered(OPT_fdepfile_entry)) {
Opts.ExtraDeps.push_back(A->getValue());
@@ -1537,7 +1409,7 @@ static bool parseShowColorsArgs(const ArgList &Args, bool DefaultColor) {
}
static bool checkVerifyPrefixes(const std::vector<std::string> &VerifyPrefixes,
- DiagnosticsEngine *Diags) {
+ DiagnosticsEngine &Diags) {
bool Success = true;
for (const auto &Prefix : VerifyPrefixes) {
// Every prefix must start with a letter and contain only alphanumeric
@@ -1547,103 +1419,75 @@ static bool checkVerifyPrefixes(const std::vector<std::string> &VerifyPrefixes,
});
if (BadChar != Prefix.end() || !isLetter(Prefix[0])) {
Success = false;
- if (Diags) {
- Diags->Report(diag::err_drv_invalid_value) << "-verify=" << Prefix;
- Diags->Report(diag::note_drv_verify_prefix_spelling);
- }
+ Diags.Report(diag::err_drv_invalid_value) << "-verify=" << Prefix;
+ Diags.Report(diag::note_drv_verify_prefix_spelling);
}
}
return Success;
}
+bool CompilerInvocation::parseSimpleArgs(const ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ bool Success = true;
+
+#define OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, Success, ID, FLAGS, PARAM, \
+ SHOULD_PARSE, this->KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
+ MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef OPTION_WITH_MARSHALLING
+
+ return Success;
+}
+
bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
DiagnosticsEngine *Diags,
bool DefaultDiagColor) {
+ Optional<DiagnosticsEngine> IgnoringDiags;
+ if (!Diags) {
+ IgnoringDiags.emplace(new DiagnosticIDs(), new DiagnosticOptions(),
+ new IgnoringDiagConsumer());
+ Diags = &*IgnoringDiags;
+ }
+
+ // The key paths of diagnostic options defined in Options.td start with
+ // "DiagnosticOpts->". Let's provide the expected variable name and type.
+ DiagnosticOptions *DiagnosticOpts = &Opts;
bool Success = true;
- Opts.DiagnosticLogFile =
- std::string(Args.getLastArgValue(OPT_diagnostic_log_file));
+#define DIAG_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, *Diags, Success, ID, FLAGS, PARAM, \
+ SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
+ MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef DIAG_OPTION_WITH_MARSHALLING
+
+ llvm::sys::Process::UseANSIEscapeCodes(Opts.UseANSIEscapeCodes);
+
if (Arg *A =
Args.getLastArg(OPT_diagnostic_serialized_file, OPT__serialize_diags))
Opts.DiagnosticSerializationFile = A->getValue();
- Opts.IgnoreWarnings = Args.hasArg(OPT_w);
- Opts.NoRewriteMacros = Args.hasArg(OPT_Wno_rewrite_macros);
- Opts.Pedantic = Args.hasArg(OPT_pedantic);
- Opts.PedanticErrors = Args.hasArg(OPT_pedantic_errors);
- Opts.ShowCarets = !Args.hasArg(OPT_fno_caret_diagnostics);
Opts.ShowColors = parseShowColorsArgs(Args, DefaultDiagColor);
- Opts.ShowColumn = !Args.hasArg(OPT_fno_show_column);
- Opts.ShowFixits = !Args.hasArg(OPT_fno_diagnostics_fixit_info);
- Opts.ShowLocation = !Args.hasArg(OPT_fno_show_source_location);
- Opts.AbsolutePath = Args.hasArg(OPT_fdiagnostics_absolute_paths);
- Opts.ShowOptionNames = !Args.hasArg(OPT_fno_diagnostics_show_option);
-
- // Default behavior is to not to show note include stacks.
- Opts.ShowNoteIncludeStack = false;
- if (Arg *A = Args.getLastArg(OPT_fdiagnostics_show_note_include_stack,
- OPT_fno_diagnostics_show_note_include_stack))
- if (A->getOption().matches(OPT_fdiagnostics_show_note_include_stack))
- Opts.ShowNoteIncludeStack = true;
-
- StringRef ShowOverloads =
- Args.getLastArgValue(OPT_fshow_overloads_EQ, "all");
- if (ShowOverloads == "best")
- Opts.setShowOverloads(Ovl_Best);
- else if (ShowOverloads == "all")
- Opts.setShowOverloads(Ovl_All);
- else {
- Success = false;
- if (Diags)
- Diags->Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_fshow_overloads_EQ)->getAsString(Args)
- << ShowOverloads;
- }
-
- StringRef ShowCategory =
- Args.getLastArgValue(OPT_fdiagnostics_show_category, "none");
- if (ShowCategory == "none")
- Opts.ShowCategories = 0;
- else if (ShowCategory == "id")
- Opts.ShowCategories = 1;
- else if (ShowCategory == "name")
- Opts.ShowCategories = 2;
- else {
- Success = false;
- if (Diags)
- Diags->Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_fdiagnostics_show_category)->getAsString(Args)
- << ShowCategory;
- }
-
- StringRef Format =
- Args.getLastArgValue(OPT_fdiagnostics_format, "clang");
- if (Format == "clang")
- Opts.setFormat(DiagnosticOptions::Clang);
- else if (Format == "msvc")
- Opts.setFormat(DiagnosticOptions::MSVC);
- else if (Format == "msvc-fallback") {
- Opts.setFormat(DiagnosticOptions::MSVC);
+
+ if (Args.getLastArgValue(OPT_fdiagnostics_format) == "msvc-fallback")
Opts.CLFallbackMode = true;
- } else if (Format == "vi")
- Opts.setFormat(DiagnosticOptions::Vi);
- else {
- Success = false;
- if (Diags)
- Diags->Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_fdiagnostics_format)->getAsString(Args)
- << Format;
- }
- Opts.ShowSourceRanges = Args.hasArg(OPT_fdiagnostics_print_source_range_info);
- Opts.ShowParseableFixits = Args.hasArg(OPT_fdiagnostics_parseable_fixits);
- Opts.ShowPresumedLoc = !Args.hasArg(OPT_fno_diagnostics_use_presumed_location);
Opts.VerifyDiagnostics = Args.hasArg(OPT_verify) || Args.hasArg(OPT_verify_EQ);
- Opts.VerifyPrefixes = Args.getAllArgValues(OPT_verify_EQ);
if (Args.hasArg(OPT_verify))
Opts.VerifyPrefixes.push_back("expected");
// Keep VerifyPrefixes in its original order for the sake of diagnostics, and
// then sort it to prepare for fast lookup using std::binary_search.
- if (!checkVerifyPrefixes(Opts.VerifyPrefixes, Diags)) {
+ if (!checkVerifyPrefixes(Opts.VerifyPrefixes, *Diags)) {
Opts.VerifyDiagnostics = false;
Success = false;
}
@@ -1652,40 +1496,15 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
DiagnosticLevelMask DiagMask = DiagnosticLevelMask::None;
Success &= parseDiagnosticLevelMask("-verify-ignore-unexpected=",
Args.getAllArgValues(OPT_verify_ignore_unexpected_EQ),
- Diags, DiagMask);
+ *Diags, DiagMask);
if (Args.hasArg(OPT_verify_ignore_unexpected))
DiagMask = DiagnosticLevelMask::All;
Opts.setVerifyIgnoreUnexpected(DiagMask);
- Opts.ElideType = !Args.hasArg(OPT_fno_elide_type);
- Opts.ShowTemplateTree = Args.hasArg(OPT_fdiagnostics_show_template_tree);
- Opts.ErrorLimit = getLastArgIntValue(Args, OPT_ferror_limit, 0, Diags);
- Opts.MacroBacktraceLimit =
- getLastArgIntValue(Args, OPT_fmacro_backtrace_limit,
- DiagnosticOptions::DefaultMacroBacktraceLimit, Diags);
- Opts.TemplateBacktraceLimit = getLastArgIntValue(
- Args, OPT_ftemplate_backtrace_limit,
- DiagnosticOptions::DefaultTemplateBacktraceLimit, Diags);
- Opts.ConstexprBacktraceLimit = getLastArgIntValue(
- Args, OPT_fconstexpr_backtrace_limit,
- DiagnosticOptions::DefaultConstexprBacktraceLimit, Diags);
- Opts.SpellCheckingLimit = getLastArgIntValue(
- Args, OPT_fspell_checking_limit,
- DiagnosticOptions::DefaultSpellCheckingLimit, Diags);
- Opts.SnippetLineLimit = getLastArgIntValue(
- Args, OPT_fcaret_diagnostics_max_lines,
- DiagnosticOptions::DefaultSnippetLineLimit, Diags);
- Opts.TabStop = getLastArgIntValue(Args, OPT_ftabstop,
- DiagnosticOptions::DefaultTabStop, Diags);
if (Opts.TabStop == 0 || Opts.TabStop > DiagnosticOptions::MaxTabStop) {
Opts.TabStop = DiagnosticOptions::DefaultTabStop;
- if (Diags)
- Diags->Report(diag::warn_ignoring_ftabstop_value)
- << Opts.TabStop << DiagnosticOptions::DefaultTabStop;
+ Diags->Report(diag::warn_ignoring_ftabstop_value)
+ << Opts.TabStop << DiagnosticOptions::DefaultTabStop;
}
- Opts.MessageLength =
- getLastArgIntValue(Args, OPT_fmessage_length_EQ, 0, Diags);
-
- Opts.UndefPrefixes = Args.getAllArgValues(OPT_Wundef_prefix_EQ);
addDiagnosticArgs(Args, OPT_W_Group, OPT_W_value_Group, Opts.Warnings);
addDiagnosticArgs(Args, OPT_R_Group, OPT_R_value_Group, Opts.Remarks);
@@ -1693,10 +1512,6 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
return Success;
}
-static void ParseFileSystemArgs(FileSystemOptions &Opts, ArgList &Args) {
- Opts.WorkingDir = std::string(Args.getLastArgValue(OPT_working_directory));
-}
-
/// Parse the argument to the -ftest-module-file-extension
/// command-line argument.
///
@@ -1854,7 +1669,6 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::PluginAction;
Opts.ActionName = A->getValue();
}
- Opts.AddPluginActions = Args.getAllArgValues(OPT_add_plugin);
for (const auto *AA : Args.filtered(OPT_plugin_arg))
Opts.PluginArgs[AA->getValue(0)].emplace_back(AA->getValue(1));
@@ -1885,128 +1699,25 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
}
- Opts.DisableFree = Args.hasArg(OPT_disable_free);
- Opts.OutputFile = std::string(Args.getLastArgValue(OPT_o));
Opts.Plugins = Args.getAllArgValues(OPT_load);
- Opts.RelocatablePCH = Args.hasArg(OPT_relocatable_pch);
- Opts.ShowHelp = Args.hasArg(OPT_help);
- Opts.ShowStats = Args.hasArg(OPT_print_stats);
- Opts.ShowTimers = Args.hasArg(OPT_ftime_report);
- Opts.PrintSupportedCPUs = Args.hasArg(OPT_print_supported_cpus);
- Opts.TimeTrace = Args.hasArg(OPT_ftime_trace);
- Opts.TimeTraceGranularity = getLastArgIntValue(
- Args, OPT_ftime_trace_granularity_EQ, Opts.TimeTraceGranularity, Diags);
- Opts.ShowVersion = Args.hasArg(OPT_version);
- Opts.ASTMergeFiles = Args.getAllArgValues(OPT_ast_merge);
- Opts.LLVMArgs = Args.getAllArgValues(OPT_mllvm);
- Opts.FixWhatYouCan = Args.hasArg(OPT_fix_what_you_can);
- Opts.FixOnlyWarnings = Args.hasArg(OPT_fix_only_warnings);
- Opts.FixAndRecompile = Args.hasArg(OPT_fixit_recompile);
- Opts.FixToTemporaries = Args.hasArg(OPT_fixit_to_temp);
Opts.ASTDumpDecls = Args.hasArg(OPT_ast_dump, OPT_ast_dump_EQ);
Opts.ASTDumpAll = Args.hasArg(OPT_ast_dump_all, OPT_ast_dump_all_EQ);
- Opts.ASTDumpFilter = std::string(Args.getLastArgValue(OPT_ast_dump_filter));
- Opts.ASTDumpLookups = Args.hasArg(OPT_ast_dump_lookups);
- Opts.ASTDumpDeclTypes = Args.hasArg(OPT_ast_dump_decl_types);
- Opts.UseGlobalModuleIndex = !Args.hasArg(OPT_fno_modules_global_index);
- Opts.GenerateGlobalModuleIndex = Opts.UseGlobalModuleIndex;
- Opts.ModuleMapFiles = Args.getAllArgValues(OPT_fmodule_map_file);
// Only the -fmodule-file=<file> form.
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
Opts.ModuleFiles.push_back(std::string(Val));
}
- Opts.ModulesEmbedFiles = Args.getAllArgValues(OPT_fmodules_embed_file_EQ);
- Opts.ModulesEmbedAllFiles = Args.hasArg(OPT_fmodules_embed_all_files);
- Opts.IncludeTimestamps = !Args.hasArg(OPT_fno_pch_timestamp);
- Opts.UseTemporary = !Args.hasArg(OPT_fno_temp_file);
- Opts.IsSystemModule = Args.hasArg(OPT_fsystem_module);
if (Opts.ProgramAction != frontend::GenerateModule && Opts.IsSystemModule)
Diags.Report(diag::err_drv_argument_only_allowed_with) << "-fsystem-module"
<< "-emit-module";
- Opts.CodeCompleteOpts.IncludeMacros
- = Args.hasArg(OPT_code_completion_macros);
- Opts.CodeCompleteOpts.IncludeCodePatterns
- = Args.hasArg(OPT_code_completion_patterns);
- Opts.CodeCompleteOpts.IncludeGlobals
- = !Args.hasArg(OPT_no_code_completion_globals);
- Opts.CodeCompleteOpts.IncludeNamespaceLevelDecls
- = !Args.hasArg(OPT_no_code_completion_ns_level_decls);
- Opts.CodeCompleteOpts.IncludeBriefComments
- = Args.hasArg(OPT_code_completion_brief_comments);
- Opts.CodeCompleteOpts.IncludeFixIts
- = Args.hasArg(OPT_code_completion_with_fixits);
-
- Opts.OverrideRecordLayoutsFile =
- std::string(Args.getLastArgValue(OPT_foverride_record_layout_EQ));
- Opts.AuxTriple = std::string(Args.getLastArgValue(OPT_aux_triple));
if (Args.hasArg(OPT_aux_target_cpu))
Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu));
if (Args.hasArg(OPT_aux_target_feature))
Opts.AuxTargetFeatures = Args.getAllArgValues(OPT_aux_target_feature);
- Opts.StatsFile = std::string(Args.getLastArgValue(OPT_stats_file));
-
- if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
- OPT_arcmt_modify,
- OPT_arcmt_migrate)) {
- switch (A->getOption().getID()) {
- default:
- llvm_unreachable("missed a case");
- case OPT_arcmt_check:
- Opts.ARCMTAction = FrontendOptions::ARCMT_Check;
- break;
- case OPT_arcmt_modify:
- Opts.ARCMTAction = FrontendOptions::ARCMT_Modify;
- break;
- case OPT_arcmt_migrate:
- Opts.ARCMTAction = FrontendOptions::ARCMT_Migrate;
- break;
- }
- }
- Opts.MTMigrateDir =
- std::string(Args.getLastArgValue(OPT_mt_migrate_directory));
- Opts.ARCMTMigrateReportOut =
- std::string(Args.getLastArgValue(OPT_arcmt_migrate_report_output));
- Opts.ARCMTMigrateEmitARCErrors
- = Args.hasArg(OPT_arcmt_migrate_emit_arc_errors);
-
- if (Args.hasArg(OPT_objcmt_migrate_literals))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Literals;
- if (Args.hasArg(OPT_objcmt_migrate_subscripting))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Subscripting;
- if (Args.hasArg(OPT_objcmt_migrate_property_dot_syntax))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_PropertyDotSyntax;
- if (Args.hasArg(OPT_objcmt_migrate_property))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Property;
- if (Args.hasArg(OPT_objcmt_migrate_readonly_property))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_ReadonlyProperty;
- if (Args.hasArg(OPT_objcmt_migrate_readwrite_property))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_ReadwriteProperty;
- if (Args.hasArg(OPT_objcmt_migrate_annotation))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Annotation;
- if (Args.hasArg(OPT_objcmt_returns_innerpointer_property))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_ReturnsInnerPointerProperty;
- if (Args.hasArg(OPT_objcmt_migrate_instancetype))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Instancetype;
- if (Args.hasArg(OPT_objcmt_migrate_nsmacros))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_NsMacros;
- if (Args.hasArg(OPT_objcmt_migrate_protocol_conformance))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_ProtocolConformance;
- if (Args.hasArg(OPT_objcmt_atomic_property))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_AtomicProperty;
- if (Args.hasArg(OPT_objcmt_ns_nonatomic_iosonly))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_NsAtomicIOSOnlyProperty;
- if (Args.hasArg(OPT_objcmt_migrate_designated_init))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_DesignatedInitializer;
- if (Args.hasArg(OPT_objcmt_migrate_all))
- Opts.ObjCMTAction |= FrontendOptions::ObjCMT_MigrateDecls;
-
- Opts.ObjCMTWhiteListPath =
- std::string(Args.getLastArgValue(OPT_objcmt_whitelist_dir_path));
if (Opts.ARCMTAction != FrontendOptions::ARCMT_None &&
Opts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
@@ -2022,8 +1733,9 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
// FIXME: Supporting '<lang>-header-cpp-output' would be useful.
bool Preprocessed = XValue.consume_back("-cpp-output");
bool ModuleMap = XValue.consume_back("-module-map");
- IsHeaderFile =
- !Preprocessed && !ModuleMap && XValue.consume_back("-header");
+ IsHeaderFile = !Preprocessed && !ModuleMap &&
+ XValue != "precompiled-header" &&
+ XValue.consume_back("-header");
// Principal languages.
DashX = llvm::StringSwitch<InputKind>(XValue)
@@ -2050,7 +1762,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
DashX = llvm::StringSwitch<InputKind>(XValue)
.Case("cpp-output", InputKind(Language::C).getPreprocessed())
.Case("assembler-with-cpp", Language::Asm)
- .Cases("ast", "pcm",
+ .Cases("ast", "pcm", "precompiled-header",
InputKind(Language::Unknown, InputKind::Precompiled))
.Case("ir", Language::LLVM_IR)
.Default(Language::Unknown);
@@ -2107,14 +1819,8 @@ std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
const std::string &WorkingDir) {
- Opts.Sysroot = std::string(Args.getLastArgValue(OPT_isysroot, "/"));
- Opts.Verbose = Args.hasArg(OPT_v);
- Opts.UseBuiltinIncludes = !Args.hasArg(OPT_nobuiltininc);
- Opts.UseStandardSystemIncludes = !Args.hasArg(OPT_nostdsysteminc);
- Opts.UseStandardCXXIncludes = !Args.hasArg(OPT_nostdincxx);
if (const Arg *A = Args.getLastArg(OPT_stdlib_EQ))
Opts.UseLibcxx = (strcmp(A->getValue(), "libc++") == 0);
- Opts.ResourceDir = std::string(Args.getLastArgValue(OPT_resource_dir));
// Canonicalize -fmodules-cache-path before storing it.
SmallString<128> P(Args.getLastArgValue(OPT_fmodules_cache_path));
@@ -2127,8 +1833,6 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
llvm::sys::path::remove_dots(P);
Opts.ModuleCachePath = std::string(P.str());
- Opts.ModuleUserBuildPath =
- std::string(Args.getLastArgValue(OPT_fmodules_user_build_path));
// Only the -fmodule-file=<name>=<file> form.
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
@@ -2140,26 +1844,6 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
}
for (const auto *A : Args.filtered(OPT_fprebuilt_module_path))
Opts.AddPrebuiltModulePath(A->getValue());
- Opts.DisableModuleHash = Args.hasArg(OPT_fdisable_module_hash);
- Opts.ModulesHashContent = Args.hasArg(OPT_fmodules_hash_content);
- Opts.ModulesValidateDiagnosticOptions =
- !Args.hasArg(OPT_fmodules_disable_diagnostic_validation);
- Opts.ImplicitModuleMaps = Args.hasArg(OPT_fimplicit_module_maps);
- Opts.ModuleMapFileHomeIsCwd = Args.hasArg(OPT_fmodule_map_file_home_is_cwd);
- Opts.ModuleCachePruneInterval =
- getLastArgIntValue(Args, OPT_fmodules_prune_interval, 7 * 24 * 60 * 60);
- Opts.ModuleCachePruneAfter =
- getLastArgIntValue(Args, OPT_fmodules_prune_after, 31 * 24 * 60 * 60);
- Opts.ModulesValidateOncePerBuildSession =
- Args.hasArg(OPT_fmodules_validate_once_per_build_session);
- Opts.BuildSessionTimestamp =
- getLastArgUInt64Value(Args, OPT_fbuild_session_timestamp, 0);
- Opts.ModulesValidateSystemHeaders =
- Args.hasArg(OPT_fmodules_validate_system_headers);
- Opts.ValidateASTInputFilesContent =
- Args.hasArg(OPT_fvalidate_ast_input_files_content);
- if (const Arg *A = Args.getLastArg(OPT_fmodule_format_EQ))
- Opts.ModuleFormat = A->getValue();
for (const auto *A : Args.filtered(OPT_fmodules_ignore_macro)) {
StringRef MacroDef = A->getValue();
@@ -2252,7 +1936,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
const llvm::Triple &T,
- PreprocessorOptions &PPOpts,
+ std::vector<std::string> &Includes,
LangStandard::Kind LangStd) {
// Set some properties which depend solely on the input kind; it would be nice
// to move these to the language standard, and have the driver resolve the
@@ -2316,6 +2000,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
}
const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
+ Opts.LangStd = LangStd;
Opts.LineComment = Std.hasLineComments();
Opts.C99 = Std.isC99();
Opts.C11 = Std.isC11();
@@ -2326,9 +2011,8 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.CPlusPlus14 = Std.isCPlusPlus14();
Opts.CPlusPlus17 = Std.isCPlusPlus17();
Opts.CPlusPlus20 = Std.isCPlusPlus20();
- Opts.Digraphs = Std.hasDigraphs();
+ Opts.CPlusPlus2b = Std.isCPlusPlus2b();
Opts.GNUMode = Std.isGNUMode();
- Opts.GNUInline = !Opts.C99 && !Opts.CPlusPlus;
Opts.GNUCVersion = 0;
Opts.HexFloats = Std.hasHexFloats();
Opts.ImplicitInt = Std.hasImplicitInt();
@@ -2343,6 +2027,8 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.OpenCLVersion = 120;
else if (LangStd == LangStandard::lang_opencl20)
Opts.OpenCLVersion = 200;
+ else if (LangStd == LangStandard::lang_opencl30)
+ Opts.OpenCLVersion = 300;
else if (LangStd == LangStandard::lang_openclcpp)
Opts.OpenCLCPlusPlusVersion = 100;
@@ -2350,34 +2036,38 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
if (Opts.OpenCL) {
Opts.AltiVec = 0;
Opts.ZVector = 0;
- Opts.setLaxVectorConversions(LangOptions::LaxVectorConversionKind::None);
Opts.setDefaultFPContractMode(LangOptions::FPM_On);
- Opts.NativeHalfType = 1;
- Opts.NativeHalfArgsAndReturns = 1;
Opts.OpenCLCPlusPlus = Opts.CPlusPlus;
// Include default header file for OpenCL.
if (Opts.IncludeDefaultHeader) {
if (Opts.DeclareOpenCLBuiltins) {
// Only include base header file for builtin types and constants.
- PPOpts.Includes.push_back("opencl-c-base.h");
+ Includes.push_back("opencl-c-base.h");
} else {
- PPOpts.Includes.push_back("opencl-c.h");
+ Includes.push_back("opencl-c.h");
}
}
}
Opts.HIP = IK.getLanguage() == Language::HIP;
Opts.CUDA = IK.getLanguage() == Language::CUDA || Opts.HIP;
- if (Opts.CUDA)
- // Set default FP_CONTRACT to FAST.
+ if (Opts.HIP) {
+ // HIP toolchain does not support 'Fast' FPOpFusion in backends since it
+ // fuses multiplication/addition instructions without contract flag from
+ // device library functions in LLVM bitcode, which causes accuracy loss in
+ // certain math functions, e.g. tan(-1e20) becomes -0.933 instead of 0.8446.
+ // For device library functions in bitcode to work, 'Strict' or 'Standard'
+ // FPOpFusion options in backends is needed. Therefore 'fast-honor-pragmas'
+ // FP contract option is used to allow fuse across statements in frontend
+ // whereas respecting contract flag in backend.
+ Opts.setDefaultFPContractMode(LangOptions::FPM_FastHonorPragmas);
+ } else if (Opts.CUDA) {
+ // Allow fuse across statements disregarding pragmas.
Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
+ }
Opts.RenderScript = IK.getLanguage() == Language::RenderScript;
- if (Opts.RenderScript) {
- Opts.NativeHalfType = 1;
- Opts.NativeHalfArgsAndReturns = 1;
- }
// OpenCL and C++ both have bool, true, false keywords.
Opts.Bool = Opts.OpenCL || Opts.CPlusPlus;
@@ -2388,7 +2078,6 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
// C++ has wchar_t keyword.
Opts.WChar = Opts.CPlusPlus;
- Opts.GNUKeywords = Opts.GNUMode;
Opts.CXXOperatorNames = Opts.CPlusPlus;
Opts.AlignedAllocation = Opts.CPlusPlus17;
@@ -2399,24 +2088,6 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.DoubleSquareBracketAttributes = Opts.CPlusPlus11 || Opts.C2x;
}
-/// Attempt to parse a visibility value out of the given argument.
-static Visibility parseVisibility(Arg *arg, ArgList &args,
- DiagnosticsEngine &diags) {
- StringRef value = arg->getValue();
- if (value == "default") {
- return DefaultVisibility;
- } else if (value == "hidden" || value == "internal") {
- return HiddenVisibility;
- } else if (value == "protected") {
- // FIXME: diagnose if target does not support protected visibility
- return ProtectedVisibility;
- }
-
- diags.Report(diag::err_drv_invalid_value)
- << arg->getAsString(args) << value;
- return DefaultVisibility;
-}
-
/// Check if input file kind and language standard are compatible.
static bool IsInputCompatibleWithStandard(InputKind IK,
const LangStandard &S) {
@@ -2486,10 +2157,19 @@ static const StringRef GetInputKindName(InputKind IK) {
llvm_unreachable("unknown input language");
}
-static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
- const TargetOptions &TargetOpts,
- PreprocessorOptions &PPOpts,
- DiagnosticsEngine &Diags) {
+static void GenerateLangArgs(const LangOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA) {
+ if (Opts.IncludeDefaultHeader)
+ Args.push_back(SA(GetOptName(OPT_finclude_default_header)));
+ if (Opts.DeclareOpenCLBuiltins)
+ Args.push_back(SA(GetOptName(OPT_fdeclare_opencl_builtins)));
+}
+
+void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
+ InputKind IK, const llvm::Triple &T,
+ std::vector<std::string> &Includes,
+ DiagnosticsEngine &Diags) {
// FIXME: Cleanup per-file based stuff.
LangStandard::Kind LangStd = LangStandard::lang_unspecified;
if (const Arg *A = Args.getLastArg(OPT_std_EQ)) {
@@ -2531,24 +2211,17 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
}
- if (Args.hasArg(OPT_fno_dllexport_inlines))
- Opts.DllExportInlines = false;
-
- if (const Arg *A = Args.getLastArg(OPT_fcf_protection_EQ)) {
- StringRef Name = A->getValue();
- if (Name == "full" || Name == "branch") {
- Opts.CFProtectionBranch = 1;
- }
- }
// -cl-std only applies for OpenCL language standards.
// Override the -std option in this case.
if (const Arg *A = Args.getLastArg(OPT_cl_std_EQ)) {
LangStandard::Kind OpenCLLangStd
= llvm::StringSwitch<LangStandard::Kind>(A->getValue())
.Cases("cl", "CL", LangStandard::lang_opencl10)
+ .Cases("cl1.0", "CL1.0", LangStandard::lang_opencl10)
.Cases("cl1.1", "CL1.1", LangStandard::lang_opencl11)
.Cases("cl1.2", "CL1.2", LangStandard::lang_opencl12)
.Cases("cl2.0", "CL2.0", LangStandard::lang_opencl20)
+ .Cases("cl3.0", "CL3.0", LangStandard::lang_opencl30)
.Cases("clc++", "CLC++", LangStandard::lang_openclcpp)
.Default(LangStandard::lang_unspecified);
@@ -2560,81 +2233,39 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
LangStd = OpenCLLangStd;
}
- Opts.SYCL = Args.hasArg(options::OPT_fsycl);
- Opts.SYCLIsDevice = Opts.SYCL && Args.hasArg(options::OPT_fsycl_is_device);
- if (Opts.SYCL) {
- // -sycl-std applies to any SYCL source, not only those containing kernels,
- // but also those using the SYCL API
- if (const Arg *A = Args.getLastArg(OPT_sycl_std_EQ)) {
- Opts.SYCLVersion = llvm::StringSwitch<unsigned>(A->getValue())
- .Cases("2017", "1.2.1", "121", "sycl-1.2.1", 2017)
- .Default(0U);
-
- if (Opts.SYCLVersion == 0U) {
- // User has passed an invalid value to the flag, this is an error
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue();
- }
- }
- }
-
+ // These need to be parsed now. They are used to set OpenCL defaults.
Opts.IncludeDefaultHeader = Args.hasArg(OPT_finclude_default_header);
Opts.DeclareOpenCLBuiltins = Args.hasArg(OPT_fdeclare_opencl_builtins);
- llvm::Triple T(TargetOpts.Triple);
- CompilerInvocation::setLangDefaults(Opts, IK, T, PPOpts, LangStd);
+ CompilerInvocation::setLangDefaults(Opts, IK, T, Includes, LangStd);
- // -cl-strict-aliasing needs to emit diagnostic in the case where CL > 1.0.
- // This option should be deprecated for CL > 1.0 because
- // this option was added for compatibility with OpenCL 1.0.
- if (Args.getLastArg(OPT_cl_strict_aliasing)
- && Opts.OpenCLVersion > 100) {
- Diags.Report(diag::warn_option_invalid_ocl_version)
- << Opts.getOpenCLVersionTuple().getAsString()
- << Args.getLastArg(OPT_cl_strict_aliasing)->getAsString(Args);
- }
+ // The key paths of codegen options defined in Options.td start with
+ // "LangOpts->". Let's provide the expected variable name and type.
+ LangOptions *LangOpts = &Opts;
+ bool Success = true;
- // We abuse '-f[no-]gnu-keywords' to force overriding all GNU-extension
- // keywords. This behavior is provided by GCC's poorly named '-fasm' flag,
- // while a subset (the non-C++ GNU keywords) is provided by GCC's
- // '-fgnu-keywords'. Clang conflates the two for simplicity under the single
- // name, as it doesn't seem a useful distinction.
- Opts.GNUKeywords = Args.hasFlag(OPT_fgnu_keywords, OPT_fno_gnu_keywords,
- Opts.GNUKeywords);
+#define LANG_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, Success, ID, FLAGS, PARAM, \
+ SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
+ MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef LANG_OPTION_WITH_MARSHALLING
- Opts.Digraphs = Args.hasFlag(OPT_fdigraphs, OPT_fno_digraphs, Opts.Digraphs);
+ if (const Arg *A = Args.getLastArg(OPT_fcf_protection_EQ)) {
+ StringRef Name = A->getValue();
+ if (Name == "full" || Name == "branch") {
+ Opts.CFProtectionBranch = 1;
+ }
+ }
if (Args.hasArg(OPT_fno_operator_names))
Opts.CXXOperatorNames = 0;
- if (Args.hasArg(OPT_fcuda_is_device))
- Opts.CUDAIsDevice = 1;
-
- if (Args.hasArg(OPT_fcuda_allow_variadic_functions))
- Opts.CUDAAllowVariadicFunctions = 1;
-
- if (Args.hasArg(OPT_fno_cuda_host_device_constexpr))
- Opts.CUDAHostDeviceConstexpr = 0;
-
- if (Opts.CUDAIsDevice && Args.hasArg(OPT_fcuda_approx_transcendentals))
- Opts.CUDADeviceApproxTranscendentals = 1;
-
- Opts.GPURelocatableDeviceCode = Args.hasArg(OPT_fgpu_rdc);
- if (Args.hasArg(OPT_fgpu_allow_device_init)) {
- if (Opts.HIP)
- Opts.GPUAllowDeviceInit = 1;
- else
- Diags.Report(diag::warn_ignored_hip_only_option)
- << Args.getLastArg(OPT_fgpu_allow_device_init)->getAsString(Args);
- }
- Opts.HIPUseNewLaunchAPI = Args.hasArg(OPT_fhip_new_launch_api);
- if (Opts.HIP)
- Opts.GPUMaxThreadsPerBlock = getLastArgIntValue(
- Args, OPT_gpu_max_threads_per_block_EQ, Opts.GPUMaxThreadsPerBlock);
- else if (Args.hasArg(OPT_gpu_max_threads_per_block_EQ))
- Diags.Report(diag::warn_ignored_hip_only_option)
- << Args.getLastArg(OPT_gpu_max_threads_per_block_EQ)->getAsString(Args);
-
if (Opts.ObjC) {
if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) {
StringRef value = arg->getValue();
@@ -2677,9 +2308,6 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ObjCWeak = Opts.ObjCWeakRuntime;
}
- if (Args.hasArg(OPT_fno_objc_infer_related_result_type))
- Opts.ObjCInferRelatedResultType = 0;
-
if (Args.hasArg(OPT_fobjc_subscripting_legacy_runtime))
Opts.ObjCSubscriptingLegacyRuntime =
(Opts.ObjCRuntime.getKind() == ObjCRuntime::FragileMacOSX);
@@ -2700,66 +2328,6 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.GNUCVersion = Major * 100 * 100 + Minor * 100 + Patch;
}
- if (Args.hasArg(OPT_fgnu89_inline)) {
- if (Opts.CPlusPlus)
- Diags.Report(diag::err_drv_argument_not_allowed_with)
- << "-fgnu89-inline" << GetInputKindName(IK);
- else
- Opts.GNUInline = 1;
- }
-
- if (Args.hasArg(OPT_fapple_kext)) {
- if (!Opts.CPlusPlus)
- Diags.Report(diag::warn_c_kext);
- else
- Opts.AppleKext = 1;
- }
-
- if (Args.hasArg(OPT_print_ivar_layout))
- Opts.ObjCGCBitmapPrint = 1;
-
- if (Args.hasArg(OPT_fno_constant_cfstrings))
- Opts.NoConstantCFStrings = 1;
- if (const auto *A = Args.getLastArg(OPT_fcf_runtime_abi_EQ))
- Opts.CFRuntime =
- llvm::StringSwitch<LangOptions::CoreFoundationABI>(A->getValue())
- .Cases("unspecified", "standalone", "objc",
- LangOptions::CoreFoundationABI::ObjectiveC)
- .Cases("swift", "swift-5.0",
- LangOptions::CoreFoundationABI::Swift5_0)
- .Case("swift-4.2", LangOptions::CoreFoundationABI::Swift4_2)
- .Case("swift-4.1", LangOptions::CoreFoundationABI::Swift4_1)
- .Default(LangOptions::CoreFoundationABI::ObjectiveC);
-
- if (Args.hasArg(OPT_fzvector))
- Opts.ZVector = 1;
-
- if (Args.hasArg(OPT_pthread))
- Opts.POSIXThreads = 1;
-
- // The value-visibility mode defaults to "default".
- if (Arg *visOpt = Args.getLastArg(OPT_fvisibility)) {
- Opts.setValueVisibilityMode(parseVisibility(visOpt, Args, Diags));
- } else {
- Opts.setValueVisibilityMode(DefaultVisibility);
- }
-
- // The type-visibility mode defaults to the value-visibility mode.
- if (Arg *typeVisOpt = Args.getLastArg(OPT_ftype_visibility)) {
- Opts.setTypeVisibilityMode(parseVisibility(typeVisOpt, Args, Diags));
- } else {
- Opts.setTypeVisibilityMode(Opts.getValueVisibilityMode());
- }
-
- if (Args.hasArg(OPT_fvisibility_inlines_hidden))
- Opts.InlineVisibilityHidden = 1;
-
- if (Args.hasArg(OPT_fvisibility_global_new_delete_hidden))
- Opts.GlobalAllocationFunctionVisibilityHidden = 1;
-
- if (Args.hasArg(OPT_fapply_global_visibility_to_externs))
- Opts.SetVisibilityForExternDecls = 1;
-
if (Args.hasArg(OPT_ftrapv)) {
Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
// Set the handler, if one is specified.
@@ -2769,7 +2337,6 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
else if (Args.hasArg(OPT_fwrapv))
Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined);
- Opts.MSVCCompat = Args.hasArg(OPT_fms_compatibility);
Opts.MicrosoftExt = Opts.MSVCCompat || Args.hasArg(OPT_fms_extensions);
Opts.AsmBlocks = Args.hasArg(OPT_fasm_blocks) || Opts.MicrosoftExt;
Opts.MSCompatibilityVersion = 0;
@@ -2786,38 +2353,15 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// Mimicking gcc's behavior, trigraphs are only enabled if -trigraphs
// is specified, or -std is set to a conforming mode.
// Trigraphs are disabled by default in c++1z onwards.
- Opts.Trigraphs = !Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus17;
+ // For z/OS, trigraphs are enabled by default (without regard to the above).
+ Opts.Trigraphs =
+ (!Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus17) || T.isOSzOS();
Opts.Trigraphs =
Args.hasFlag(OPT_ftrigraphs, OPT_fno_trigraphs, Opts.Trigraphs);
Opts.DollarIdents = Args.hasFlag(OPT_fdollars_in_identifiers,
OPT_fno_dollars_in_identifiers,
Opts.DollarIdents);
- Opts.PascalStrings = Args.hasArg(OPT_fpascal_strings);
- Opts.setVtorDispMode(
- MSVtorDispMode(getLastArgIntValue(Args, OPT_vtordisp_mode_EQ, 1, Diags)));
- Opts.Borland = Args.hasArg(OPT_fborland_extensions);
- Opts.WritableStrings = Args.hasArg(OPT_fwritable_strings);
- Opts.ConstStrings = Args.hasFlag(OPT_fconst_strings, OPT_fno_const_strings,
- Opts.ConstStrings);
- if (Arg *A = Args.getLastArg(OPT_flax_vector_conversions_EQ)) {
- using LaxKind = LangOptions::LaxVectorConversionKind;
- if (auto Kind = llvm::StringSwitch<Optional<LaxKind>>(A->getValue())
- .Case("none", LaxKind::None)
- .Case("integer", LaxKind::Integer)
- .Case("all", LaxKind::All)
- .Default(llvm::None))
- Opts.setLaxVectorConversions(*Kind);
- else
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue();
- }
- if (Args.hasArg(OPT_fno_threadsafe_statics))
- Opts.ThreadsafeStatics = 0;
- Opts.Exceptions = Args.hasArg(OPT_fexceptions);
- Opts.IgnoreExceptions = Args.hasArg(OPT_fignore_exceptions);
- Opts.ObjCExceptions = Args.hasArg(OPT_fobjc_exceptions);
- Opts.CXXExceptions = Args.hasArg(OPT_fcxx_exceptions);
// -ffixed-point
Opts.FixedPoint =
@@ -2829,35 +2373,15 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
/*Default=*/false) &&
Opts.FixedPoint;
- // Handle exception personalities
- Arg *A = Args.getLastArg(
- options::OPT_fsjlj_exceptions, options::OPT_fseh_exceptions,
- options::OPT_fdwarf_exceptions, options::OPT_fwasm_exceptions);
- if (A) {
- const Option &Opt = A->getOption();
- llvm::Triple T(TargetOpts.Triple);
- if (T.isWindowsMSVCEnvironment())
- Diags.Report(diag::err_fe_invalid_exception_model)
- << Opt.getName() << T.str();
-
- Opts.SjLjExceptions = Opt.matches(options::OPT_fsjlj_exceptions);
- Opts.SEHExceptions = Opt.matches(options::OPT_fseh_exceptions);
- Opts.DWARFExceptions = Opt.matches(options::OPT_fdwarf_exceptions);
- Opts.WasmExceptions = Opt.matches(options::OPT_fwasm_exceptions);
- }
-
- Opts.ExternCNoUnwind = Args.hasArg(OPT_fexternc_nounwind);
- Opts.TraditionalCPP = Args.hasArg(OPT_traditional_cpp);
-
Opts.RTTI = Opts.CPlusPlus && !Args.hasArg(OPT_fno_rtti);
Opts.RTTIData = Opts.RTTI && !Args.hasArg(OPT_fno_rtti_data);
Opts.Blocks = Args.hasArg(OPT_fblocks) || (Opts.OpenCL
&& Opts.OpenCLVersion == 200);
- Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
Opts.Coroutines = Opts.CPlusPlus20 || Args.hasArg(OPT_fcoroutines_ts);
Opts.ConvergentFunctions = Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) ||
- Args.hasArg(OPT_fconvergent_functions);
+ Opts.SYCLIsDevice ||
+ Args.hasArg(OPT_fconvergent_functions);
Opts.DoubleSquareBracketAttributes =
Args.hasFlag(OPT_fdouble_square_bracket_attributes,
@@ -2865,10 +2389,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.DoubleSquareBracketAttributes);
Opts.CPlusPlusModules = Opts.CPlusPlus20;
- Opts.ModulesTS = Args.hasArg(OPT_fmodules_ts);
Opts.Modules =
Args.hasArg(OPT_fmodules) || Opts.ModulesTS || Opts.CPlusPlusModules;
- Opts.ModulesStrictDeclUse = Args.hasArg(OPT_fmodules_strict_decluse);
Opts.ModulesDeclUse =
Args.hasArg(OPT_fmodules_decluse) || Opts.ModulesStrictDeclUse;
// FIXME: We only need this in C++ modules / Modules TS if we might textually
@@ -2876,126 +2398,37 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ModulesLocalVisibility =
Args.hasArg(OPT_fmodules_local_submodule_visibility) || Opts.ModulesTS ||
Opts.CPlusPlusModules;
- Opts.ModulesCodegen = Args.hasArg(OPT_fmodules_codegen);
- Opts.ModulesDebugInfo = Args.hasArg(OPT_fmodules_debuginfo);
Opts.ModulesSearchAll = Opts.Modules &&
!Args.hasArg(OPT_fno_modules_search_all) &&
Args.hasArg(OPT_fmodules_search_all);
- Opts.ModulesErrorRecovery = !Args.hasArg(OPT_fno_modules_error_recovery);
- Opts.ImplicitModules = !Args.hasArg(OPT_fno_implicit_modules);
Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char);
Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
Opts.Char8 = Args.hasFlag(OPT_fchar8__t, OPT_fno_char8__t, Opts.CPlusPlus20);
- if (const Arg *A = Args.getLastArg(OPT_fwchar_type_EQ)) {
- Opts.WCharSize = llvm::StringSwitch<unsigned>(A->getValue())
- .Case("char", 1)
- .Case("short", 2)
- .Case("int", 4)
- .Default(0);
- if (Opts.WCharSize == 0)
- Diags.Report(diag::err_fe_invalid_wchar_type) << A->getValue();
- }
- Opts.WCharIsSigned = Args.hasFlag(OPT_fsigned_wchar, OPT_fno_signed_wchar, true);
- Opts.ShortEnums = Args.hasArg(OPT_fshort_enums);
- Opts.Freestanding = Args.hasArg(OPT_ffreestanding);
Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
if (!Opts.NoBuiltin)
getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
- Opts.NoMathBuiltin = Args.hasArg(OPT_fno_math_builtin);
- Opts.RelaxedTemplateTemplateArgs =
- Args.hasArg(OPT_frelaxed_template_template_args);
- Opts.SizedDeallocation = Args.hasArg(OPT_fsized_deallocation);
Opts.AlignedAllocation =
Args.hasFlag(OPT_faligned_allocation, OPT_fno_aligned_allocation,
Opts.AlignedAllocation);
Opts.AlignedAllocationUnavailable =
Opts.AlignedAllocation && Args.hasArg(OPT_aligned_alloc_unavailable);
- Opts.NewAlignOverride =
- getLastArgIntValue(Args, OPT_fnew_alignment_EQ, 0, Diags);
- if (Opts.NewAlignOverride && !llvm::isPowerOf2_32(Opts.NewAlignOverride)) {
- Arg *A = Args.getLastArg(OPT_fnew_alignment_EQ);
- Diags.Report(diag::err_fe_invalid_alignment) << A->getAsString(Args)
- << A->getValue();
- Opts.NewAlignOverride = 0;
- }
- Opts.ConceptSatisfactionCaching =
- !Args.hasArg(OPT_fno_concept_satisfaction_caching);
if (Args.hasArg(OPT_fconcepts_ts))
Diags.Report(diag::warn_fe_concepts_ts_flag);
- // Recovery AST still heavily relies on dependent-type machinery.
- Opts.RecoveryAST =
- Args.hasFlag(OPT_frecovery_ast, OPT_fno_recovery_ast, Opts.CPlusPlus);
- Opts.RecoveryASTType =
- Args.hasFlag(OPT_frecovery_ast_type, OPT_fno_recovery_ast_type, false);
- Opts.HeinousExtensions = Args.hasArg(OPT_fheinous_gnu_extensions);
- Opts.AccessControl = !Args.hasArg(OPT_fno_access_control);
- Opts.ElideConstructors = !Args.hasArg(OPT_fno_elide_constructors);
Opts.MathErrno = !Opts.OpenCL && Args.hasArg(OPT_fmath_errno);
- Opts.InstantiationDepth =
- getLastArgIntValue(Args, OPT_ftemplate_depth, 1024, Diags);
- Opts.ArrowDepth =
- getLastArgIntValue(Args, OPT_foperator_arrow_depth, 256, Diags);
- Opts.ConstexprCallDepth =
- getLastArgIntValue(Args, OPT_fconstexpr_depth, 512, Diags);
- Opts.ConstexprStepLimit =
- getLastArgIntValue(Args, OPT_fconstexpr_steps, 1048576, Diags);
- Opts.EnableNewConstInterp =
- Args.hasArg(OPT_fexperimental_new_constant_interpreter);
- Opts.BracketDepth = getLastArgIntValue(Args, OPT_fbracket_depth, 256, Diags);
- Opts.DelayedTemplateParsing = Args.hasArg(OPT_fdelayed_template_parsing);
- Opts.NumLargeByValueCopy =
- getLastArgIntValue(Args, OPT_Wlarge_by_value_copy_EQ, 0, Diags);
- Opts.MSBitfields = Args.hasArg(OPT_mms_bitfields);
- Opts.ObjCConstantStringClass =
- std::string(Args.getLastArgValue(OPT_fconstant_string_class));
- Opts.ObjCDefaultSynthProperties =
- !Args.hasArg(OPT_disable_objc_default_synthesize_properties);
- Opts.EncodeExtendedBlockSig =
- Args.hasArg(OPT_fencode_extended_block_signature);
- Opts.EmitAllDecls = Args.hasArg(OPT_femit_all_decls);
- Opts.PackStruct = getLastArgIntValue(Args, OPT_fpack_struct_EQ, 0, Diags);
- Opts.MaxTypeAlign = getLastArgIntValue(Args, OPT_fmax_type_align_EQ, 0, Diags);
- Opts.AlignDouble = Args.hasArg(OPT_malign_double);
- Opts.DoubleSize = getLastArgIntValue(Args, OPT_mdouble_EQ, 0, Diags);
Opts.LongDoubleSize = Args.hasArg(OPT_mlong_double_128)
? 128
: Args.hasArg(OPT_mlong_double_64) ? 64 : 0;
- Opts.PPCIEEELongDouble = Args.hasArg(OPT_mabi_EQ_ieeelongdouble);
+ Opts.EnableAIXExtendedAltivecABI = Args.hasArg(OPT_mabi_EQ_vec_extabi);
Opts.PICLevel = getLastArgIntValue(Args, OPT_pic_level, 0, Diags);
- Opts.ROPI = Args.hasArg(OPT_fropi);
- Opts.RWPI = Args.hasArg(OPT_frwpi);
- Opts.PIE = Args.hasArg(OPT_pic_is_pie);
- Opts.Static = Args.hasArg(OPT_static_define);
- Opts.DumpRecordLayoutsSimple = Args.hasArg(OPT_fdump_record_layouts_simple);
Opts.DumpRecordLayouts = Opts.DumpRecordLayoutsSimple
|| Args.hasArg(OPT_fdump_record_layouts);
- Opts.DumpVTableLayouts = Args.hasArg(OPT_fdump_vtable_layouts);
- Opts.SpellChecking = !Args.hasArg(OPT_fno_spell_checking);
- Opts.NoBitFieldTypeAlign = Args.hasArg(OPT_fno_bitfield_type_align);
- Opts.SinglePrecisionConstants = Args.hasArg(OPT_cl_single_precision_constant);
- Opts.FastRelaxedMath = Args.hasArg(OPT_cl_fast_relaxed_math);
if (Opts.FastRelaxedMath)
Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
- Opts.HexagonQdsp6Compat = Args.hasArg(OPT_mqdsp6_compat);
- Opts.FakeAddressSpaceMap = Args.hasArg(OPT_ffake_address_space_map);
- Opts.ParseUnknownAnytype = Args.hasArg(OPT_funknown_anytype);
- Opts.DebuggerSupport = Args.hasArg(OPT_fdebugger_support);
- Opts.DebuggerCastResultToId = Args.hasArg(OPT_fdebugger_cast_result_to_id);
- Opts.DebuggerObjCLiteral = Args.hasArg(OPT_fdebugger_objc_literal);
- Opts.ApplePragmaPack = Args.hasArg(OPT_fapple_pragma_pack);
- Opts.ModuleName = std::string(Args.getLastArgValue(OPT_fmodule_name_EQ));
- Opts.CurrentModule = Opts.ModuleName;
- Opts.AppExt = Args.hasArg(OPT_fapplication_extension);
- Opts.ModuleFeatures = Args.getAllArgValues(OPT_fmodule_feature);
+ Opts.XLPragmaPack = Args.hasArg(OPT_fxl_pragma_pack);
llvm::sort(Opts.ModuleFeatures);
- Opts.NativeHalfType |= Args.hasArg(OPT_fnative_half_type);
- Opts.NativeHalfArgsAndReturns |= Args.hasArg(OPT_fnative_half_arguments_and_returns);
- // Enable HalfArgsAndReturns if present in Args or if NativeHalfArgsAndReturns
- // is enabled.
- Opts.HalfArgsAndReturns = Args.hasArg(OPT_fallow_half_arguments_and_returns)
- | Opts.NativeHalfArgsAndReturns;
- Opts.GNUAsm = !Args.hasArg(OPT_fno_gnu_inline_asm);
- Opts.Cmse = Args.hasArg(OPT_mcmse); // Armv8-M Security Extensions
+
+ Opts.ArmSveVectorBits =
+ getLastArgIntValue(Args, options::OPT_msve_vector_bits_EQ, 0, Diags);
// __declspec is enabled by default for the PS4 by the driver, and also
// enabled for Microsoft Extensions or Borland Extensions, here.
@@ -3009,87 +2442,12 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Args.hasFlag(OPT_fdeclspec, OPT_fno_declspec,
(Opts.MicrosoftExt || Opts.Borland || Opts.CUDA));
- if (Arg *A = Args.getLastArg(OPT_faddress_space_map_mangling_EQ)) {
- switch (llvm::StringSwitch<unsigned>(A->getValue())
- .Case("target", LangOptions::ASMM_Target)
- .Case("no", LangOptions::ASMM_Off)
- .Case("yes", LangOptions::ASMM_On)
- .Default(255)) {
- default:
- Diags.Report(diag::err_drv_invalid_value)
- << "-faddress-space-map-mangling=" << A->getValue();
- break;
- case LangOptions::ASMM_Target:
- Opts.setAddressSpaceMapMangling(LangOptions::ASMM_Target);
- break;
- case LangOptions::ASMM_On:
- Opts.setAddressSpaceMapMangling(LangOptions::ASMM_On);
- break;
- case LangOptions::ASMM_Off:
- Opts.setAddressSpaceMapMangling(LangOptions::ASMM_Off);
- break;
- }
- }
-
- if (Arg *A = Args.getLastArg(OPT_fms_memptr_rep_EQ)) {
- LangOptions::PragmaMSPointersToMembersKind InheritanceModel =
- llvm::StringSwitch<LangOptions::PragmaMSPointersToMembersKind>(
- A->getValue())
- .Case("single",
- LangOptions::PPTMK_FullGeneralitySingleInheritance)
- .Case("multiple",
- LangOptions::PPTMK_FullGeneralityMultipleInheritance)
- .Case("virtual",
- LangOptions::PPTMK_FullGeneralityVirtualInheritance)
- .Default(LangOptions::PPTMK_BestCase);
- if (InheritanceModel == LangOptions::PPTMK_BestCase)
- Diags.Report(diag::err_drv_invalid_value)
- << "-fms-memptr-rep=" << A->getValue();
-
- Opts.setMSPointerToMemberRepresentationMethod(InheritanceModel);
- }
-
- // Check for MS default calling conventions being specified.
- if (Arg *A = Args.getLastArg(OPT_fdefault_calling_conv_EQ)) {
- LangOptions::DefaultCallingConvention DefaultCC =
- llvm::StringSwitch<LangOptions::DefaultCallingConvention>(A->getValue())
- .Case("cdecl", LangOptions::DCC_CDecl)
- .Case("fastcall", LangOptions::DCC_FastCall)
- .Case("stdcall", LangOptions::DCC_StdCall)
- .Case("vectorcall", LangOptions::DCC_VectorCall)
- .Case("regcall", LangOptions::DCC_RegCall)
- .Default(LangOptions::DCC_None);
- if (DefaultCC == LangOptions::DCC_None)
- Diags.Report(diag::err_drv_invalid_value)
- << "-fdefault-calling-conv=" << A->getValue();
-
- llvm::Triple T(TargetOpts.Triple);
- llvm::Triple::ArchType Arch = T.getArch();
- bool emitError = (DefaultCC == LangOptions::DCC_FastCall ||
- DefaultCC == LangOptions::DCC_StdCall) &&
- Arch != llvm::Triple::x86;
- emitError |= (DefaultCC == LangOptions::DCC_VectorCall ||
- DefaultCC == LangOptions::DCC_RegCall) &&
- !T.isX86();
- if (emitError)
- Diags.Report(diag::err_drv_argument_not_allowed_with)
- << A->getSpelling() << T.getTriple();
- else
- Opts.setDefaultCallingConv(DefaultCC);
- }
-
- Opts.SemanticInterposition = Args.hasArg(OPT_fsemantic_interposition);
- // An explicit -fno-semantic-interposition infers dso_local.
- Opts.ExplicitNoSemanticInterposition =
- Args.hasArg(OPT_fno_semantic_interposition);
-
// -mrtd option
if (Arg *A = Args.getLastArg(OPT_mrtd)) {
if (Opts.getDefaultCallingConv() != LangOptions::DCC_None)
Diags.Report(diag::err_drv_argument_not_allowed_with)
<< A->getSpelling() << "-fdefault-calling-conv";
else {
- llvm::Triple T(TargetOpts.Triple);
if (T.getArch() != llvm::Triple::x86)
Diags.Report(diag::err_drv_argument_not_allowed_with)
<< A->getSpelling() << T.getTriple();
@@ -3098,8 +2456,6 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
}
- // Check if -fopenmp is specified and set default version to 5.0.
- Opts.OpenMP = Args.hasArg(options::OPT_fopenmp) ? 50 : 0;
// Check if -fopenmp-simd is specified.
bool IsSimdSpecified =
Args.hasFlag(options::OPT_fopenmp_simd, options::OPT_fno_openmp_simd,
@@ -3128,8 +2484,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// Add unsupported host targets here:
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
- Diags.Report(diag::err_drv_omp_host_target_not_supported)
- << TargetOpts.Triple;
+ Diags.Report(diag::err_drv_omp_host_target_not_supported) << T.str();
break;
}
}
@@ -3154,28 +2509,32 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.OpenMPCUDAReductionBufNum, Diags);
}
- // Prevent auto-widening the representation of loop counters during an
- // OpenMP collapse clause.
- Opts.OpenMPOptimisticCollapse =
- Args.hasArg(options::OPT_fopenmp_optimistic_collapse) ? 1 : 0;
-
// Get the OpenMP target triples if any.
if (Arg *A = Args.getLastArg(options::OPT_fopenmp_targets_EQ)) {
+ enum ArchPtrSize { Arch16Bit, Arch32Bit, Arch64Bit };
+ auto getArchPtrSize = [](const llvm::Triple &T) {
+ if (T.isArch16Bit())
+ return Arch16Bit;
+ if (T.isArch32Bit())
+ return Arch32Bit;
+ assert(T.isArch64Bit() && "Expected 64-bit architecture");
+ return Arch64Bit;
+ };
for (unsigned i = 0; i < A->getNumValues(); ++i) {
llvm::Triple TT(A->getValue(i));
if (TT.getArch() == llvm::Triple::UnknownArch ||
- !(TT.getArch() == llvm::Triple::aarch64 ||
- TT.getArch() == llvm::Triple::ppc ||
- TT.getArch() == llvm::Triple::ppc64 ||
- TT.getArch() == llvm::Triple::ppc64le ||
+ !(TT.getArch() == llvm::Triple::aarch64 || TT.isPPC() ||
TT.getArch() == llvm::Triple::nvptx ||
TT.getArch() == llvm::Triple::nvptx64 ||
TT.getArch() == llvm::Triple::amdgcn ||
TT.getArch() == llvm::Triple::x86 ||
TT.getArch() == llvm::Triple::x86_64))
Diags.Report(diag::err_drv_invalid_omp_target) << A->getValue(i);
+ else if (getArchPtrSize(T) != getArchPtrSize(TT))
+ Diags.Report(diag::err_drv_incompatible_omp_arch)
+ << A->getValue(i) << T.str();
else
Opts.OMPTargetTriples.push_back(TT);
}
@@ -3226,47 +2585,6 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (InlineArg->getOption().matches(options::OPT_fno_inline))
Opts.NoInlineDefine = true;
- Opts.FastMath =
- Args.hasArg(OPT_ffast_math) || Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only) ||
- Args.hasArg(OPT_ffast_math) ||
- Args.hasArg(OPT_cl_finite_math_only) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
- Args.hasArg(OPT_ffast_math) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.AllowFPReassoc = Args.hasArg(OPT_mreassociate) ||
- Args.hasArg(OPT_menable_unsafe_fp_math) ||
- Args.hasArg(OPT_ffast_math) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.NoHonorNaNs =
- Args.hasArg(OPT_menable_no_nans) || Args.hasArg(OPT_ffinite_math_only) ||
- Args.hasArg(OPT_ffast_math) || Args.hasArg(OPT_cl_finite_math_only) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.NoHonorInfs = Args.hasArg(OPT_menable_no_infinities) ||
- Args.hasArg(OPT_ffinite_math_only) ||
- Args.hasArg(OPT_ffast_math) ||
- Args.hasArg(OPT_cl_finite_math_only) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.NoSignedZero = Args.hasArg(OPT_fno_signed_zeros) ||
- Args.hasArg(OPT_menable_unsafe_fp_math) ||
- Args.hasArg(OPT_ffast_math) ||
- Args.hasArg(OPT_cl_no_signed_zeros) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.AllowRecip = Args.hasArg(OPT_freciprocal_math) ||
- Args.hasArg(OPT_menable_unsafe_fp_math) ||
- Args.hasArg(OPT_ffast_math) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
- // Currently there's no clang option to enable this individually
- Opts.ApproxFunc = Args.hasArg(OPT_menable_unsafe_fp_math) ||
- Args.hasArg(OPT_ffast_math) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
-
if (Arg *A = Args.getLastArg(OPT_ffp_contract)) {
StringRef Val = A->getValue();
if (Val == "fast")
@@ -3275,27 +2593,12 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.setDefaultFPContractMode(LangOptions::FPM_On);
else if (Val == "off")
Opts.setDefaultFPContractMode(LangOptions::FPM_Off);
+ else if (Val == "fast-honor-pragmas")
+ Opts.setDefaultFPContractMode(LangOptions::FPM_FastHonorPragmas);
else
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
- if (Args.hasArg(OPT_fexperimental_strict_floating_point))
- Opts.ExpStrictFP = true;
-
- auto FPRM = llvm::RoundingMode::NearestTiesToEven;
- if (Args.hasArg(OPT_frounding_math)) {
- FPRM = llvm::RoundingMode::Dynamic;
- }
- Opts.setFPRoundingMode(FPRM);
-
- if (Args.hasArg(OPT_ftrapping_math)) {
- Opts.setFPExceptionMode(LangOptions::FPE_Strict);
- }
-
- if (Args.hasArg(OPT_fno_trapping_math)) {
- Opts.setFPExceptionMode(LangOptions::FPE_Ignore);
- }
-
LangOptions::FPExceptionModeKind FPEB = LangOptions::FPE_Ignore;
if (Arg *A = Args.getLastArg(OPT_ffp_exception_behavior_EQ)) {
StringRef Val = A->getValue();
@@ -3310,74 +2613,15 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
Opts.setFPExceptionMode(FPEB);
- Opts.RetainCommentsFromSystemHeaders =
- Args.hasArg(OPT_fretain_comments_from_system_headers);
-
- unsigned SSP = getLastArgIntValue(Args, OPT_stack_protector, 0, Diags);
- switch (SSP) {
- default:
- Diags.Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_stack_protector)->getAsString(Args) << SSP;
- break;
- case 0: Opts.setStackProtector(LangOptions::SSPOff); break;
- case 1: Opts.setStackProtector(LangOptions::SSPOn); break;
- case 2: Opts.setStackProtector(LangOptions::SSPStrong); break;
- case 3: Opts.setStackProtector(LangOptions::SSPReq); break;
- }
-
- if (Arg *A = Args.getLastArg(OPT_ftrivial_auto_var_init)) {
- StringRef Val = A->getValue();
- if (Val == "uninitialized")
- Opts.setTrivialAutoVarInit(
- LangOptions::TrivialAutoVarInitKind::Uninitialized);
- else if (Val == "zero")
- Opts.setTrivialAutoVarInit(LangOptions::TrivialAutoVarInitKind::Zero);
- else if (Val == "pattern")
- Opts.setTrivialAutoVarInit(LangOptions::TrivialAutoVarInitKind::Pattern);
- else
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
- }
-
- if (Arg *A = Args.getLastArg(OPT_ftrivial_auto_var_init_stop_after)) {
- int Val = std::stoi(A->getValue());
- Opts.TrivialAutoVarInitStopAfter = Val;
- }
-
// Parse -fsanitize= arguments.
parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
Diags, Opts.Sanitize);
- // -fsanitize-address-field-padding=N has to be a LangOpt, parse it here.
- Opts.SanitizeAddressFieldPadding =
- getLastArgIntValue(Args, OPT_fsanitize_address_field_padding, 0, Diags);
- Opts.SanitizerBlacklistFiles = Args.getAllArgValues(OPT_fsanitize_blacklist);
std::vector<std::string> systemBlacklists =
Args.getAllArgValues(OPT_fsanitize_system_blacklist);
Opts.SanitizerBlacklistFiles.insert(Opts.SanitizerBlacklistFiles.end(),
systemBlacklists.begin(),
systemBlacklists.end());
- // -fxray-instrument
- Opts.XRayInstrument = Args.hasArg(OPT_fxray_instrument);
- Opts.XRayAlwaysEmitCustomEvents =
- Args.hasArg(OPT_fxray_always_emit_customevents);
- Opts.XRayAlwaysEmitTypedEvents =
- Args.hasArg(OPT_fxray_always_emit_typedevents);
-
- // -fxray-{always,never}-instrument= filenames.
- Opts.XRayAlwaysInstrumentFiles =
- Args.getAllArgValues(OPT_fxray_always_instrument);
- Opts.XRayNeverInstrumentFiles =
- Args.getAllArgValues(OPT_fxray_never_instrument);
- Opts.XRayAttrListFiles = Args.getAllArgValues(OPT_fxray_attr_list);
-
- // -fforce-emit-vtables
- Opts.ForceEmitVTables = Args.hasArg(OPT_fforce_emit_vtables);
-
- // -fallow-editor-placeholders
- Opts.AllowEditorPlaceholders = Args.hasArg(OPT_fallow_editor_placeholders);
-
- Opts.RegisterStaticDestructors = !Args.hasArg(OPT_fno_cxx_static_destructors);
-
if (Arg *A = Args.getLastArg(OPT_fclang_abi_compat_EQ)) {
Opts.setClangABICompat(LangOptions::ClangABI::Latest);
@@ -3405,20 +2649,14 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.setClangABICompat(LangOptions::ClangABI::Ver7);
else if (Major <= 9)
Opts.setClangABICompat(LangOptions::ClangABI::Ver9);
+ else if (Major <= 11)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver11);
} else if (Ver != "latest") {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
}
}
- Opts.CompleteMemberPointers = Args.hasArg(OPT_fcomplete_member_pointers);
- Opts.BuildingPCHWithObjectFile = Args.hasArg(OPT_building_pch_with_obj);
- Opts.PCHInstantiateTemplates = Args.hasArg(OPT_fpch_instantiate_templates);
-
- Opts.MatrixTypes = Args.hasArg(OPT_fenable_matrix);
-
- Opts.MaxTokens = getLastArgIntValue(Args, OPT_fmax_tokens_EQ, 0, Diags);
-
if (Arg *A = Args.getLastArg(OPT_msign_return_address_EQ)) {
StringRef SignScope = A->getValue();
@@ -3451,16 +2689,15 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
}
- Opts.BranchTargetEnforcement = Args.hasArg(OPT_mbranch_target_enforce);
- Opts.SpeculativeLoadHardening = Args.hasArg(OPT_mspeculative_load_hardening);
-
- Opts.CompatibilityQualifiedIdBlockParamTypeChecking =
- Args.hasArg(OPT_fcompatibility_qualified_id_block_param_type_checking);
-
- Opts.RelativeCXXABIVTables =
- Args.hasFlag(OPT_fexperimental_relative_cxx_abi_vtables,
- OPT_fno_experimental_relative_cxx_abi_vtables,
- /*default=*/false);
+ std::string ThreadModel =
+ std::string(Args.getLastArgValue(OPT_mthread_model, "posix"));
+ if (ThreadModel != "posix" && ThreadModel != "single")
+ Diags.Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_mthread_model)->getAsString(Args) << ThreadModel;
+ Opts.setThreadModel(
+ llvm::StringSwitch<LangOptions::ThreadModelKind>(ThreadModel)
+ .Case("posix", LangOptions::ThreadModelKind::POSIX)
+ .Case("single", LangOptions::ThreadModelKind::Single));
}
static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
@@ -3510,18 +2747,9 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags,
frontend::ActionKind Action) {
- Opts.ImplicitPCHInclude = std::string(Args.getLastArgValue(OPT_include_pch));
Opts.PCHWithHdrStop = Args.hasArg(OPT_pch_through_hdrstop_create) ||
Args.hasArg(OPT_pch_through_hdrstop_use);
- Opts.PCHWithHdrStopCreate = Args.hasArg(OPT_pch_through_hdrstop_create);
- Opts.PCHThroughHeader =
- std::string(Args.getLastArgValue(OPT_pch_through_header_EQ));
- Opts.UsePredefines = !Args.hasArg(OPT_undef);
- Opts.DetailedRecord = Args.hasArg(OPT_detailed_preprocessing_record);
- Opts.DisablePCHValidation = Args.hasArg(OPT_fno_validate_pch);
- Opts.AllowPCHWithCompilerErrors = Args.hasArg(OPT_fallow_pch_with_errors);
-
- Opts.DumpDeserializedPCHDecls = Args.hasArg(OPT_dump_deserialized_pch_decls);
+
for (const auto *A : Args.filtered(OPT_error_on_deserialized_pch_decl))
Opts.DeserializedPCHDeclsToErrorOn.insert(A->getValue());
@@ -3566,8 +2794,6 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.addMacroUndef(A->getValue());
}
- Opts.MacroIncludes = Args.getAllArgValues(OPT_imacros);
-
// Add the ordered list of -includes.
for (const auto *A : Args.filtered(OPT_include))
Opts.Includes.emplace_back(A->getValue());
@@ -3586,27 +2812,11 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.addRemappedFile(Split.first, Split.second);
}
- if (Arg *A = Args.getLastArg(OPT_fobjc_arc_cxxlib_EQ)) {
- StringRef Name = A->getValue();
- unsigned Library = llvm::StringSwitch<unsigned>(Name)
- .Case("libc++", ARCXX_libcxx)
- .Case("libstdc++", ARCXX_libstdcxx)
- .Case("none", ARCXX_nolib)
- .Default(~0U);
- if (Library == ~0U)
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
- else
- Opts.ObjCXXARCStandardLibrary = (ObjCXXARCStandardLibraryKind)Library;
- }
-
// Always avoid lexing editor placeholders when we're just running the
// preprocessor as we never want to emit the
// "editor placeholder in source file" error in PP only mode.
if (isStrictlyPreprocessorAction(Action))
Opts.LexEditorPlaceholders = false;
-
- Opts.SetUpStaticAnalyzer = Args.hasArg(OPT_setup_static_analyzer);
- Opts.DisablePragmaDebugCrash = Args.hasArg(OPT_disable_pragma_debug_crash);
}
static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
@@ -3617,43 +2827,11 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
else
Opts.ShowCPP = 0;
- Opts.ShowComments = Args.hasArg(OPT_C);
- Opts.ShowLineMarkers = !Args.hasArg(OPT_P);
- Opts.ShowMacroComments = Args.hasArg(OPT_CC);
Opts.ShowMacros = Args.hasArg(OPT_dM) || Args.hasArg(OPT_dD);
- Opts.ShowIncludeDirectives = Args.hasArg(OPT_dI);
- Opts.RewriteIncludes = Args.hasArg(OPT_frewrite_includes);
- Opts.RewriteImports = Args.hasArg(OPT_frewrite_imports);
- Opts.UseLineDirectives = Args.hasArg(OPT_fuse_line_directives);
}
static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
- Opts.CodeModel = std::string(Args.getLastArgValue(OPT_mcmodel_EQ, "default"));
- Opts.ABI = std::string(Args.getLastArgValue(OPT_target_abi));
- if (Arg *A = Args.getLastArg(OPT_meabi)) {
- StringRef Value = A->getValue();
- llvm::EABI EABIVersion = llvm::StringSwitch<llvm::EABI>(Value)
- .Case("default", llvm::EABI::Default)
- .Case("4", llvm::EABI::EABI4)
- .Case("5", llvm::EABI::EABI5)
- .Case("gnu", llvm::EABI::GNU)
- .Default(llvm::EABI::Unknown);
- if (EABIVersion == llvm::EABI::Unknown)
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
- << Value;
- else
- Opts.EABIVersion = EABIVersion;
- }
- Opts.CPU = std::string(Args.getLastArgValue(OPT_target_cpu));
- Opts.FPMath = std::string(Args.getLastArgValue(OPT_mfpmath));
- Opts.FeaturesAsWritten = Args.getAllArgValues(OPT_target_feature);
- Opts.LinkerVersion =
- std::string(Args.getLastArgValue(OPT_target_linker_version));
- Opts.OpenCLExtensionsAsWritten = Args.getAllArgValues(OPT_cl_ext_EQ);
- Opts.ForceEnableInt128 = Args.hasArg(OPT_fforce_enable_int128);
- Opts.NVPTXUseShortPointers = Args.hasFlag(
- options::OPT_fcuda_short_ptr, options::OPT_fno_cuda_short_ptr, false);
if (Arg *A = Args.getLastArg(options::OPT_target_sdk_version_EQ)) {
llvm::VersionTuple Version;
if (Version.tryParse(A->getValue()))
@@ -3664,31 +2842,6 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
}
}
-bool CompilerInvocation::parseSimpleArgs(const ArgList &Args,
- DiagnosticsEngine &Diags) {
-#define OPTION_WITH_MARSHALLING_FLAG(PREFIX_TYPE, NAME, ID, KIND, GROUP, \
- ALIAS, ALIASARGS, FLAGS, PARAM, HELPTEXT, \
- METAVAR, VALUES, SPELLING, ALWAYS_EMIT, \
- KEYPATH, DEFAULT_VALUE, IS_POSITIVE) \
- this->KEYPATH = Args.hasArg(OPT_##ID) && IS_POSITIVE;
-
-#define OPTION_WITH_MARSHALLING_STRING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- TYPE, NORMALIZER, DENORMALIZER, TABLE_INDEX) \
- { \
- if (auto MaybeValue = NORMALIZER(OPT_##ID, TABLE_INDEX, Args, Diags)) \
- this->KEYPATH = static_cast<TYPE>(*MaybeValue); \
- else \
- this->KEYPATH = DEFAULT_VALUE; \
- }
-
-#include "clang/Driver/Options.inc"
-#undef OPTION_WITH_MARSHALLING_STRING
-#undef OPTION_WITH_MARSHALLING_FLAG
- return true;
-}
-
bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
ArrayRef<const char *> CommandLineArgs,
DiagnosticsEngine &Diags,
@@ -3724,11 +2877,7 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Success &= Res.parseSimpleArgs(Args, Diags);
- llvm::sys::Process::UseANSIEscapeCodes(
- Res.DiagnosticOpts->UseANSIEscapeCodes);
-
Success &= ParseAnalyzerArgs(*Res.getAnalyzerOpts(), Args, Diags);
- Success &= ParseMigratorArgs(Res.getMigratorOpts(), Args);
ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), Args);
if (!Res.getDependencyOutputOpts().OutputFile.empty() &&
Res.getDependencyOutputOpts().Targets.empty()) {
@@ -3737,17 +2886,13 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
}
Success &= ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags,
/*DefaultDiagColor=*/false);
- ParseCommentArgs(LangOpts.CommentOpts, Args);
- ParseFileSystemArgs(Res.getFileSystemOpts(), Args);
// FIXME: We shouldn't have to pass the DashX option around here
InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), Args, Diags,
LangOpts.IsHeaderFile);
ParseTargetArgs(Res.getTargetOpts(), Args, Diags);
- Success &= ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags,
- Res.getTargetOpts(), Res.getFrontendOpts());
+ llvm::Triple T(Res.getTargetOpts().Triple);
ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), Args,
Res.getFileSystemOpts().WorkingDir);
- llvm::Triple T(Res.getTargetOpts().Triple);
if (DashX.getFormat() == InputKind::Precompiled ||
DashX.getLanguage() == Language::LLVM_IR) {
// ObjCAAutoRefCount and Sanitize LangOpts are used to setup the
@@ -3764,8 +2909,8 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
} else {
// Other LangOpts are only initialized when the input is not AST or LLVM IR.
// FIXME: Should we really be calling this for an Language::Asm input?
- ParseLangArgs(LangOpts, Args, DashX, Res.getTargetOpts(),
- Res.getPreprocessorOpts(), Diags);
+ ParseLangArgs(LangOpts, Args, DashX, T, Res.getPreprocessorOpts().Includes,
+ Diags);
if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
LangOpts.ObjCExceptions = 1;
if (T.isOSDarwin() && DashX.isPreprocessed()) {
@@ -3776,12 +2921,6 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
}
}
- if (Diags.isIgnored(diag::warn_profile_data_misexpect, SourceLocation()))
- Res.FrontendOpts.LLVMArgs.push_back("-pgo-warn-misexpect");
-
- LangOpts.FunctionAlignment =
- getLastArgIntValue(Args, OPT_function_alignment, 0, Diags);
-
if (LangOpts.CUDA) {
// During CUDA device-side compilation, the aux triple is the
// triple used for host compilation.
@@ -3793,6 +2932,9 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
if (LangOpts.OpenMPIsDevice)
Res.getTargetOpts().HostTriple = Res.getFrontendOpts().AuxTriple;
+ Success &= ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags, T,
+ Res.getFrontendOpts().OutputFile, LangOpts);
+
// FIXME: Override value name discarding when asan or msan is used because the
// backend passes depend on the name of the alloca in order to print out
// names.
@@ -3822,6 +2964,8 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Res.getCodeGenOpts().Argv0 = Argv0;
Res.getCodeGenOpts().CommandLineArgs = CommandLineArgs;
+ FixupInvocation(Res, Diags, Args, DashX);
+
return Success;
}
@@ -3861,7 +3005,7 @@ std::string CompilerInvocation::getModuleHash() const {
// Extend the signature with the target options.
code = hash_combine(code, TargetOpts->Triple, TargetOpts->CPU,
- TargetOpts->ABI);
+ TargetOpts->TuneCPU, TargetOpts->ABI);
for (const auto &FeatureAsWritten : TargetOpts->FeaturesAsWritten)
code = hash_combine(code, FeatureAsWritten);
@@ -3940,41 +3084,57 @@ std::string CompilerInvocation::getModuleHash() const {
void CompilerInvocation::generateCC1CommandLine(
SmallVectorImpl<const char *> &Args, StringAllocator SA) const {
-#define OPTION_WITH_MARSHALLING_FLAG(PREFIX_TYPE, NAME, ID, KIND, GROUP, \
- ALIAS, ALIASARGS, FLAGS, PARAM, HELPTEXT, \
- METAVAR, VALUES, SPELLING, ALWAYS_EMIT, \
- KEYPATH, DEFAULT_VALUE, IS_POSITIVE) \
- if ((FLAGS) & options::CC1Option && \
- (ALWAYS_EMIT || this->KEYPATH != DEFAULT_VALUE)) \
- Args.push_back(SPELLING);
-
-#define OPTION_WITH_MARSHALLING_STRING( \
+ // Capture the extracted value as a lambda argument to avoid potential issues
+ // with lifetime extension of the reference.
+#define GENERATE_OPTION_WITH_MARSHALLING( \
+ ARGS, STRING_ALLOCATOR, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, \
+ TABLE_INDEX) \
+ if ((FLAGS)&options::CC1Option) { \
+ [&](const auto &Extracted) { \
+ if (ALWAYS_EMIT || \
+ (Extracted != \
+ static_cast<decltype(KEYPATH)>((IMPLIED_CHECK) ? (IMPLIED_VALUE) \
+ : (DEFAULT_VALUE)))) \
+ DENORMALIZER(ARGS, SPELLING, STRING_ALLOCATOR, Option::KIND##Class, \
+ TABLE_INDEX, Extracted); \
+ }(EXTRACTOR(KEYPATH)); \
+ }
+
+#define OPTION_WITH_MARSHALLING( \
PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- NORMALIZER_RET_TY, NORMALIZER, DENORMALIZER, TABLE_INDEX) \
- if (((FLAGS) & options::CC1Option) && \
- (ALWAYS_EMIT || this->KEYPATH != DEFAULT_VALUE)) { \
- if (Option::KIND##Class == Option::SeparateClass) { \
- Args.push_back(SPELLING); \
- Args.push_back(DENORMALIZER(SA, TABLE_INDEX, this->KEYPATH)); \
- } \
- }
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, KIND, FLAGS, SPELLING, \
+ ALWAYS_EMIT, this->KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, \
+ EXTRACTOR, TABLE_INDEX)
+
+#define DIAG_OPTION_WITH_MARSHALLING OPTION_WITH_MARSHALLING
+#define LANG_OPTION_WITH_MARSHALLING OPTION_WITH_MARSHALLING
+#define CODEGEN_OPTION_WITH_MARSHALLING OPTION_WITH_MARSHALLING
#include "clang/Driver/Options.inc"
-#undef OPTION_WITH_MARSHALLING_STRING
-#undef OPTION_WITH_MARSHALLING_FLAG
-}
-namespace clang {
+#undef CODEGEN_OPTION_WITH_MARSHALLING
+#undef LANG_OPTION_WITH_MARSHALLING
+#undef DIAG_OPTION_WITH_MARSHALLING
+#undef OPTION_WITH_MARSHALLING
+#undef GENERATE_OPTION_WITH_MARSHALLING
+
+ GenerateLangArgs(*LangOpts, Args, SA);
+}
IntrusiveRefCntPtr<llvm::vfs::FileSystem>
-createVFSFromCompilerInvocation(const CompilerInvocation &CI,
- DiagnosticsEngine &Diags) {
+clang::createVFSFromCompilerInvocation(const CompilerInvocation &CI,
+ DiagnosticsEngine &Diags) {
return createVFSFromCompilerInvocation(CI, Diags,
llvm::vfs::getRealFileSystem());
}
-IntrusiveRefCntPtr<llvm::vfs::FileSystem> createVFSFromCompilerInvocation(
+IntrusiveRefCntPtr<llvm::vfs::FileSystem>
+clang::createVFSFromCompilerInvocation(
const CompilerInvocation &CI, DiagnosticsEngine &Diags,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS) {
if (CI.getHeaderSearchOpts().VFSOverlayFiles.empty())
@@ -4002,5 +3162,3 @@ IntrusiveRefCntPtr<llvm::vfs::FileSystem> createVFSFromCompilerInvocation(
}
return Result;
}
-
-} // namespace clang
diff --git a/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
index 1d5a6c06b34f..ff0aa6faf33f 100644
--- a/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -40,8 +40,8 @@ std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
Args.push_back("-fsyntax-only");
// FIXME: We shouldn't have to pass in the path info.
- driver::Driver TheDriver(Args[0], llvm::sys::getDefaultTargetTriple(),
- *Diags, VFS);
+ driver::Driver TheDriver(Args[0], llvm::sys::getDefaultTargetTriple(), *Diags,
+ "clang LLVM compiler", VFS);
// Don't check that inputs exist, they may have been remapped.
TheDriver.setCheckInputsExist(false);
diff --git a/clang/lib/Frontend/DependencyFile.cpp b/clang/lib/Frontend/DependencyFile.cpp
index c9240f4122a7..fe8ab7197400 100644
--- a/clang/lib/Frontend/DependencyFile.cpp
+++ b/clang/lib/Frontend/DependencyFile.cpp
@@ -46,17 +46,12 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
// Dependency generation really does want to go all the way to the
// file entry for a source location to find out what is depended on.
// We do not want #line markers to affect dependency generation!
- Optional<FileEntryRef> File =
- SM.getFileEntryRefForID(SM.getFileID(SM.getExpansionLoc(Loc)));
- if (!File)
- return;
-
- StringRef Filename =
- llvm::sys::path::remove_leading_dotslash(File->getName());
-
- DepCollector.maybeAddDependency(Filename, /*FromModule*/false,
- isSystem(FileType),
- /*IsModuleFile*/false, /*IsMissing*/false);
+ if (Optional<StringRef> Filename = SM.getNonBuiltinFilenameForID(
+ SM.getFileID(SM.getExpansionLoc(Loc))))
+ DepCollector.maybeAddDependency(
+ llvm::sys::path::remove_leading_dotslash(*Filename),
+ /*FromModule*/ false, isSystem(FileType), /*IsModuleFile*/ false,
+ /*IsMissing*/ false);
}
void FileSkipped(const FileEntryRef &SkippedFile, const Token &FilenameTok,
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index 59a968b5c709..11b25b106627 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -121,7 +121,7 @@ public:
= Ctx.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error,
"%0 was deserialized");
Ctx.getDiagnostics().Report(Ctx.getFullLoc(D->getLocation()), DiagID)
- << ND->getNameAsString();
+ << ND;
}
DelegatingDeserializationListener::DeclRead(ID, D);
@@ -233,13 +233,12 @@ static SourceLocation ReadOriginalFileName(CompilerInstance &CI,
auto &SourceMgr = CI.getSourceManager();
auto MainFileID = SourceMgr.getMainFileID();
- bool Invalid = false;
- const auto *MainFileBuf = SourceMgr.getBuffer(MainFileID, &Invalid);
- if (Invalid)
+ auto MainFileBuf = SourceMgr.getBufferOrNone(MainFileID);
+ if (!MainFileBuf)
return SourceLocation();
std::unique_ptr<Lexer> RawLexer(
- new Lexer(MainFileID, MainFileBuf, SourceMgr, CI.getLangOpts()));
+ new Lexer(MainFileID, *MainFileBuf, SourceMgr, CI.getLangOpts()));
// If the first line has the syntax of
//
@@ -450,7 +449,7 @@ static bool loadModuleMapForModuleBuild(CompilerInstance &CI, bool IsSystem,
PresumedModuleMapFile))
return true;
- if (SrcMgr.getBuffer(ModuleMapID)->getBufferSize() == Offset)
+ if (SrcMgr.getBufferOrFake(ModuleMapID).getBufferSize() == Offset)
Offset = 0;
return false;
@@ -625,7 +624,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (auto *File = OldSM.getFileEntryForID(ID))
Input = FrontendInputFile(File->getName(), Kind);
else
- Input = FrontendInputFile(OldSM.getBuffer(ID), Kind);
+ Input = FrontendInputFile(OldSM.getBufferOrFake(ID), Kind);
}
setCurrentInput(Input, std::move(AST));
}
@@ -874,9 +873,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) {
CI.createPCHExternalASTSource(
CI.getPreprocessorOpts().ImplicitPCHInclude,
- CI.getPreprocessorOpts().DisablePCHValidation,
- CI.getPreprocessorOpts().AllowPCHWithCompilerErrors, DeserialListener,
- DeleteDeserialListener);
+ CI.getPreprocessorOpts().DisablePCHOrModuleValidation,
+ CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
+ DeserialListener, DeleteDeserialListener);
if (!CI.getASTContext().getExternalSource())
goto failure;
}
diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp
index 711e7336c820..060cec23acc4 100644
--- a/clang/lib/Frontend/FrontendActions.cpp
+++ b/clang/lib/Frontend/FrontendActions.cpp
@@ -136,13 +136,9 @@ bool GeneratePCHAction::ComputeASTConsumerArguments(CompilerInstance &CI,
std::unique_ptr<llvm::raw_pwrite_stream>
GeneratePCHAction::CreateOutputFile(CompilerInstance &CI, StringRef InFile,
std::string &OutputFile) {
- // We use createOutputFile here because this is exposed via libclang, and we
- // must disable the RemoveFileOnSignal behavior.
- // We use a temporary to avoid race conditions.
- std::unique_ptr<raw_pwrite_stream> OS =
- CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
- /*RemoveFileOnSignal=*/false, InFile,
- /*Extension=*/"", CI.getFrontendOpts().UseTemporary);
+ // Because this is exposed via libclang we must disable RemoveFileOnSignal.
+ std::unique_ptr<raw_pwrite_stream> OS = CI.createDefaultOutputFile(
+ /*Binary=*/true, InFile, /*Extension=*/"", /*RemoveFileOnSignal=*/false);
if (!OS)
return nullptr;
@@ -177,7 +173,8 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
Consumers.push_back(std::make_unique<PCHGenerator>(
CI.getPreprocessor(), CI.getModuleCache(), OutputFile, Sysroot, Buffer,
CI.getFrontendOpts().ModuleFileExtensions,
- /*AllowASTWithErrors=*/false,
+ /*AllowASTWithErrors=*/
+ +CI.getFrontendOpts().AllowPCMWithCompilerErrors,
/*IncludeTimestamps=*/
+CI.getFrontendOpts().BuildingImplicitModule,
/*ShouldCacheASTInMemory=*/
@@ -187,6 +184,11 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
return std::make_unique<MultiplexConsumer>(std::move(Consumers));
}
+bool GenerateModuleAction::shouldEraseOutputFiles() {
+ return !getCompilerInstance().getFrontendOpts().AllowPCMWithCompilerErrors &&
+ ASTFrontendAction::shouldEraseOutputFiles();
+}
+
bool GenerateModuleFromModuleMapAction::BeginSourceFileAction(
CompilerInstance &CI) {
if (!CI.getLangOpts().Modules) {
@@ -213,13 +215,10 @@ GenerateModuleFromModuleMapAction::CreateOutputFile(CompilerInstance &CI,
ModuleMapFile);
}
- // We use createOutputFile here because this is exposed via libclang, and we
- // must disable the RemoveFileOnSignal behavior.
- // We use a temporary to avoid race conditions.
- return CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
- /*RemoveFileOnSignal=*/false, InFile,
- /*Extension=*/"", /*UseTemporary=*/true,
- /*CreateMissingDirectories=*/true);
+ // Because this is exposed via libclang we must disable RemoveFileOnSignal.
+ return CI.createDefaultOutputFile(/*Binary=*/true, InFile, /*Extension=*/"",
+ /*RemoveFileOnSignal=*/false,
+ /*CreateMissingDirectories=*/true);
}
bool GenerateModuleInterfaceAction::BeginSourceFileAction(
@@ -261,7 +260,7 @@ bool GenerateHeaderModuleAction::PrepareToExecuteAction(
if (FIF.getKind().getFormat() != InputKind::Source || !FIF.isFile()) {
CI.getDiagnostics().Report(diag::err_module_header_file_not_found)
<< (FIF.isFile() ? FIF.getFile()
- : FIF.getBuffer()->getBufferIdentifier());
+ : FIF.getBuffer().getBufferIdentifier());
return true;
}
@@ -275,7 +274,8 @@ bool GenerateHeaderModuleAction::PrepareToExecuteAction(
// Set that buffer up as our "real" input.
Inputs.clear();
- Inputs.push_back(FrontendInputFile(Buffer.get(), Kind, /*IsSystem*/false));
+ Inputs.push_back(
+ FrontendInputFile(Buffer->getMemBufferRef(), Kind, /*IsSystem*/ false));
return GenerateModuleAction::PrepareToExecuteAction(CI);
}
@@ -297,7 +297,7 @@ bool GenerateHeaderModuleAction::BeginSourceFileAction(
<< Name;
continue;
}
- Headers.push_back({std::string(Name), &FE->getFileEntry()});
+ Headers.push_back({std::string(Name), *FE});
}
HS.getModuleMap().createHeaderModule(CI.getLangOpts().CurrentModule, Headers);
@@ -337,8 +337,8 @@ void VerifyPCHAction::ExecuteAction() {
CI.getPreprocessor(), CI.getModuleCache(), &CI.getASTContext(),
CI.getPCHContainerReader(), CI.getFrontendOpts().ModuleFileExtensions,
Sysroot.empty() ? "" : Sysroot.c_str(),
- /*DisableValidation*/ false,
- /*AllowPCHWithCompilerErrors*/ false,
+ DisableValidationForModuleKind::None,
+ /*AllowASTWithCompilerErrors*/ false,
/*AllowConfigurationMismatch*/ true,
/*ValidateSystemInputs*/ true));
@@ -466,7 +466,10 @@ private:
Entry.Event = BeginInstantiation ? "Begin" : "End";
if (auto *NamedTemplate = dyn_cast_or_null<NamedDecl>(Inst.Entity)) {
llvm::raw_string_ostream OS(Entry.Name);
- NamedTemplate->getNameForDiagnostic(OS, TheSema.getLangOpts(), true);
+ PrintingPolicy Policy = TheSema.Context.getPrintingPolicy();
+ // FIXME: Also ask for FullyQualifiedNames?
+ Policy.SuppressDefaultTemplateArgs = false;
+ NamedTemplate->getNameForDiagnostic(OS, Policy, true);
const PresumedLoc DefLoc =
TheSema.getSourceManager().getPresumedLoc(Inst.Entity->getLocation());
if(!DefLoc.isInvalid())
@@ -561,6 +564,7 @@ namespace {
Out.indent(2) << "Target options:\n";
Out.indent(4) << " Triple: " << TargetOpts.Triple << "\n";
Out.indent(4) << " CPU: " << TargetOpts.CPU << "\n";
+ Out.indent(4) << " TuneCPU: " << TargetOpts.TuneCPU << "\n";
Out.indent(4) << " ABI: " << TargetOpts.ABI << "\n";
if (!TargetOpts.FeaturesAsWritten.empty()) {
@@ -749,7 +753,7 @@ void DumpRawTokensAction::ExecuteAction() {
SourceManager &SM = PP.getSourceManager();
// Start lexing the specified input file.
- const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
+ llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(SM.getMainFileID());
Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts());
RawLex.SetKeepWhitespaceMode(true);
@@ -804,11 +808,9 @@ void PrintPreprocessedAction::ExecuteAction() {
// concern, so if we scan for too long, we'll just assume the file should
// be opened in binary mode.
bool BinaryMode = true;
- bool InvalidFile = false;
const SourceManager& SM = CI.getSourceManager();
- const llvm::MemoryBuffer *Buffer = SM.getBuffer(SM.getMainFileID(),
- &InvalidFile);
- if (!InvalidFile) {
+ if (llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ SM.getBufferOrNone(SM.getMainFileID())) {
const char *cur = Buffer->getBufferStart();
const char *end = Buffer->getBufferEnd();
const char *next = (cur != end) ? cur + 1 : end;
@@ -936,12 +938,12 @@ void DumpCompilerOptionsAction::ExecuteAction() {
void PrintDependencyDirectivesSourceMinimizerAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
SourceManager &SM = CI.getPreprocessor().getSourceManager();
- const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
+ llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(SM.getMainFileID());
llvm::SmallString<1024> Output;
llvm::SmallVector<minimize_source_to_dependency_directives::Token, 32> Toks;
if (minimizeSourceToDependencyDirectives(
- FromFile->getBuffer(), Output, Toks, &CI.getDiagnostics(),
+ FromFile.getBuffer(), Output, Toks, &CI.getDiagnostics(),
SM.getLocForStartOfFile(SM.getMainFileID()))) {
assert(CI.getDiagnostics().hasErrorOccurred() &&
"no errors reported for failure");
diff --git a/clang/lib/Frontend/FrontendOptions.cpp b/clang/lib/Frontend/FrontendOptions.cpp
index 9f080db733f1..4ea13cf0784f 100644
--- a/clang/lib/Frontend/FrontendOptions.cpp
+++ b/clang/lib/Frontend/FrontendOptions.cpp
@@ -29,7 +29,7 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
.Case("cppm", Language::CXX)
.Case("iim", InputKind(Language::CXX).getPreprocessed())
.Case("cl", Language::OpenCL)
- .Case("cu", Language::CUDA)
+ .Cases("cu", "cuh", Language::CUDA)
.Case("hip", Language::HIP)
.Cases("ll", "bc", Language::LLVM_IR)
.Default(Language::Unknown);
diff --git a/clang/lib/Frontend/FrontendTiming.cpp b/clang/lib/Frontend/FrontendTiming.cpp
deleted file mode 100644
index e3f44c9999f6..000000000000
--- a/clang/lib/Frontend/FrontendTiming.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-//===- FrontendTiming.cpp - Implements Frontend timing utils -------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file keps implementation of frontend timing utils.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Frontend/Utils.h"
-
-namespace clang {
-
-bool FrontendTimesIsEnabled = false;
-
-}
diff --git a/clang/lib/Frontend/InitHeaderSearch.cpp b/clang/lib/Frontend/InitHeaderSearch.cpp
index 16f1f1670e8d..bc31445d6d08 100644
--- a/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -270,6 +270,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
case llvm::Triple::Linux:
case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
+ case llvm::Triple::OpenBSD:
llvm_unreachable("Include management is handled in the driver.");
case llvm::Triple::CloudABI: {
@@ -423,6 +424,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
case llvm::Triple::Emscripten:
case llvm::Triple::Linux:
case llvm::Triple::Hurd:
+ case llvm::Triple::OpenBSD:
case llvm::Triple::Solaris:
case llvm::Triple::WASI:
case llvm::Triple::AIX:
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index 6eef1e2376f6..d47ad1b74649 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -376,8 +376,11 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__STDC_VERSION__", "199409L");
} else {
// -- __cplusplus
+ // FIXME: Use correct value for C++23.
+ if (LangOpts.CPlusPlus2b)
+ Builder.defineMacro("__cplusplus", "202101L");
// [C++20] The integer literal 202002L.
- if (LangOpts.CPlusPlus20)
+ else if (LangOpts.CPlusPlus20)
Builder.defineMacro("__cplusplus", "202002L");
// [C++17] The integer literal 201703L.
else if (LangOpts.CPlusPlus17)
@@ -403,6 +406,12 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__STDCPP_DEFAULT_NEW_ALIGNMENT__",
Twine(TI.getNewAlign() / TI.getCharWidth()) +
TI.getTypeConstantSuffix(TI.getSizeType()));
+
+ // -- __STDCPP_­THREADS__
+ // Defined, and has the value integer literal 1, if and only if a
+ // program can have more than one thread of execution.
+ if (LangOpts.getThreadModel() == LangOptions::ThreadModelKind::POSIX)
+ Builder.defineMacro("__STDCPP_THREADS__", "1");
}
// In C11 these are environment macros. In C++11 they are only defined
@@ -445,6 +454,9 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
case 200:
Builder.defineMacro("__OPENCL_C_VERSION__", "200");
break;
+ case 300:
+ Builder.defineMacro("__OPENCL_C_VERSION__", "300");
+ break;
default:
llvm_unreachable("Unsupported OpenCL version");
}
@@ -453,6 +465,7 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("CL_VERSION_1_1", "110");
Builder.defineMacro("CL_VERSION_1_2", "120");
Builder.defineMacro("CL_VERSION_2_0", "200");
+ Builder.defineMacro("CL_VERSION_3_0", "300");
if (TI.isLittleEndian())
Builder.defineMacro("__ENDIAN_LITTLE__");
@@ -463,7 +476,7 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
if (LangOpts.SYCL) {
// SYCL Version is set to a value when building SYCL applications
- if (LangOpts.SYCLVersion == 2017)
+ if (LangOpts.getSYCLVersion() == LangOptions::SYCL_2017)
Builder.defineMacro("CL_SYCL_LANGUAGE_VERSION", "121");
}
@@ -552,7 +565,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_aggregate_bases", "201603L");
Builder.defineMacro("__cpp_structured_bindings", "201606L");
Builder.defineMacro("__cpp_nontype_template_args",
- "201411L"); // (not latest)
+ LangOpts.CPlusPlus20 ? "201911L" : "201411L");
Builder.defineMacro("__cpp_fold_expressions", "201603L");
Builder.defineMacro("__cpp_guaranteed_copy_elision", "201606L");
Builder.defineMacro("__cpp_nontype_template_parameter_auto", "201606L");
@@ -740,12 +753,12 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (LangOpts.GNUCVersion && LangOpts.RTTI)
Builder.defineMacro("__GXX_RTTI");
- if (LangOpts.SjLjExceptions)
+ if (LangOpts.hasSjLjExceptions())
Builder.defineMacro("__USING_SJLJ_EXCEPTIONS__");
- else if (LangOpts.SEHExceptions)
+ else if (LangOpts.hasSEHExceptions())
Builder.defineMacro("__SEH__");
- else if (LangOpts.DWARFExceptions &&
- (TI.getTriple().isThumb() || TI.getTriple().isARM()))
+ else if (LangOpts.hasDWARFExceptions() &&
+ (TI.getTriple().isThumb() || TI.getTriple().isARM()))
Builder.defineMacro("__ARM_DWARF_EH__");
if (LangOpts.Deprecated)
@@ -1107,10 +1120,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// OpenCL definitions.
if (LangOpts.OpenCL) {
-#define OPENCLEXT(Ext) \
- if (TI.getSupportedOpenCLOpts().isSupported(#Ext, LangOpts)) \
- Builder.defineMacro(#Ext);
-#include "clang/Basic/OpenCLExtensions.def"
+ TI.getOpenCLFeatureDefines(LangOpts, Builder);
if (TI.getTriple().isSPIR())
Builder.defineMacro("__IMAGE_SUPPORT__");
diff --git a/clang/lib/Frontend/ModuleDependencyCollector.cpp b/clang/lib/Frontend/ModuleDependencyCollector.cpp
index b54eb97d6c47..2e4e64f827b0 100644
--- a/clang/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/clang/lib/Frontend/ModuleDependencyCollector.cpp
@@ -156,72 +156,32 @@ void ModuleDependencyCollector::writeFileMap() {
VFSWriter.write(OS);
}
-bool ModuleDependencyCollector::getRealPath(StringRef SrcPath,
- SmallVectorImpl<char> &Result) {
- using namespace llvm::sys;
- SmallString<256> RealPath;
- StringRef FileName = path::filename(SrcPath);
- std::string Dir = path::parent_path(SrcPath).str();
- auto DirWithSymLink = SymLinkMap.find(Dir);
-
- // Use real_path to fix any symbolic link component present in a path.
- // Computing the real path is expensive, cache the search through the
- // parent path directory.
- if (DirWithSymLink == SymLinkMap.end()) {
- if (llvm::sys::fs::real_path(Dir, RealPath))
- return false;
- SymLinkMap[Dir] = std::string(RealPath.str());
- } else {
- RealPath = DirWithSymLink->second;
- }
-
- path::append(RealPath, FileName);
- Result.swap(RealPath);
- return true;
-}
-
std::error_code ModuleDependencyCollector::copyToRoot(StringRef Src,
StringRef Dst) {
using namespace llvm::sys;
+ llvm::FileCollector::PathCanonicalizer::PathStorage Paths =
+ Canonicalizer.canonicalize(Src);
- // We need an absolute src path to append to the root.
- SmallString<256> AbsoluteSrc = Src;
- fs::make_absolute(AbsoluteSrc);
- // Canonicalize src to a native path to avoid mixed separator styles.
- path::native(AbsoluteSrc);
- // Remove redundant leading "./" pieces and consecutive separators.
- AbsoluteSrc = path::remove_leading_dotslash(AbsoluteSrc);
-
- // Canonicalize the source path by removing "..", "." components.
- SmallString<256> VirtualPath = AbsoluteSrc;
- path::remove_dots(VirtualPath, /*remove_dot_dot=*/true);
-
- // If a ".." component is present after a symlink component, remove_dots may
- // lead to the wrong real destination path. Let the source be canonicalized
- // like that but make sure we always use the real path for the destination.
- SmallString<256> CopyFrom;
- if (!getRealPath(AbsoluteSrc, CopyFrom))
- CopyFrom = VirtualPath;
SmallString<256> CacheDst = getDest();
if (Dst.empty()) {
// The common case is to map the virtual path to the same path inside the
// cache.
- path::append(CacheDst, path::relative_path(CopyFrom));
+ path::append(CacheDst, path::relative_path(Paths.CopyFrom));
} else {
// When collecting entries from input vfsoverlays, copy the external
// contents into the cache but still map from the source.
if (!fs::exists(Dst))
return std::error_code();
path::append(CacheDst, Dst);
- CopyFrom = Dst;
+ Paths.CopyFrom = Dst;
}
// Copy the file into place.
if (std::error_code EC = fs::create_directories(path::parent_path(CacheDst),
/*IgnoreExisting=*/true))
return EC;
- if (std::error_code EC = fs::copy_file(CopyFrom, CacheDst))
+ if (std::error_code EC = fs::copy_file(Paths.CopyFrom, CacheDst))
return EC;
// Always map a canonical src path to its real path into the YAML, by doing
@@ -229,7 +189,7 @@ std::error_code ModuleDependencyCollector::copyToRoot(StringRef Src,
// overlay, which is a way to emulate symlink inside the VFS; this is also
// needed for correctness, not doing that can lead to module redefinition
// errors.
- addFileMapping(VirtualPath, CacheDst);
+ addFileMapping(Paths.VirtualPath, CacheDst);
return std::error_code();
}
diff --git a/clang/lib/Frontend/PrecompiledPreamble.cpp b/clang/lib/Frontend/PrecompiledPreamble.cpp
index 6cdfc595dcae..77b93713ce68 100644
--- a/clang/lib/Frontend/PrecompiledPreamble.cpp
+++ b/clang/lib/Frontend/PrecompiledPreamble.cpp
@@ -208,6 +208,11 @@ public:
Callbacks.AfterPCHEmitted(Writer);
}
+ bool BeginSourceFileAction(CompilerInstance &CI) override {
+ assert(CI.getLangOpts().CompilingPCH);
+ return ASTFrontendAction::BeginSourceFileAction(CI);
+ }
+
bool shouldEraseOutputFiles() override { return !hasEmittedPreamblePCH(); }
bool hasCodeCompletionSupport() const override { return false; }
bool hasASTFileSupport() const override { return false; }
@@ -298,9 +303,9 @@ template <class T> bool moveOnNoError(llvm::ErrorOr<T> Val, T &Output) {
} // namespace
PreambleBounds clang::ComputePreambleBounds(const LangOptions &LangOpts,
- const llvm::MemoryBuffer *Buffer,
+ const llvm::MemoryBufferRef &Buffer,
unsigned MaxLines) {
- return Lexer::ComputePreamble(Buffer->getBuffer(), LangOpts, MaxLines);
+ return Lexer::ComputePreamble(Buffer.getBuffer(), LangOpts, MaxLines);
}
llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
@@ -396,6 +401,8 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
auto PreambleDepCollector = std::make_shared<PreambleDependencyCollector>();
Clang->addDependencyCollector(PreambleDepCollector);
+ Clang->getLangOpts().CompilingPCH = true;
+
// Remap the main source file to the preamble buffer.
StringRef MainFilePath = FrontendOpts.Inputs[0].getFile();
auto PreambleInputBuffer = llvm::MemoryBuffer::getMemBufferCopy(
@@ -456,7 +463,8 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
PrecompiledPreamble::PreambleFileHash::createForFile(File->getSize(),
ModTime);
} else {
- const llvm::MemoryBuffer *Buffer = SourceMgr.getMemoryBufferForFile(File);
+ llvm::MemoryBufferRef Buffer =
+ SourceMgr.getMemoryBufferForFileOrFake(File);
FilesInPreamble[File->getName()] =
PrecompiledPreamble::PreambleFileHash::createForMemoryBuffer(Buffer);
}
@@ -493,12 +501,12 @@ std::size_t PrecompiledPreamble::getSize() const {
}
bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
- const llvm::MemoryBuffer *MainFileBuffer,
+ const llvm::MemoryBufferRef &MainFileBuffer,
PreambleBounds Bounds,
- llvm::vfs::FileSystem *VFS) const {
+ llvm::vfs::FileSystem &VFS) const {
assert(
- Bounds.Size <= MainFileBuffer->getBufferSize() &&
+ Bounds.Size <= MainFileBuffer.getBufferSize() &&
"Buffer is too large. Bounds were calculated from a different buffer?");
auto PreambleInvocation = std::make_shared<CompilerInvocation>(Invocation);
@@ -512,7 +520,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
if (PreambleBytes.size() != Bounds.Size ||
PreambleEndsAtStartOfLine != Bounds.PreambleEndsAtStartOfLine ||
!std::equal(PreambleBytes.begin(), PreambleBytes.end(),
- MainFileBuffer->getBuffer().begin()))
+ MainFileBuffer.getBuffer().begin()))
return false;
// The preamble has not changed. We may be able to re-use the precompiled
// preamble.
@@ -524,14 +532,14 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
llvm::StringSet<> OverriddenAbsPaths; // Either by buffers or files.
for (const auto &R : PreprocessorOpts.RemappedFiles) {
llvm::vfs::Status Status;
- if (!moveOnNoError(VFS->status(R.second), Status)) {
+ if (!moveOnNoError(VFS.status(R.second), Status)) {
// If we can't stat the file we're remapping to, assume that something
// horrible happened.
return false;
}
// If a mapped file was previously missing, then it has changed.
llvm::SmallString<128> MappedPath(R.first);
- if (!VFS->makeAbsolute(MappedPath))
+ if (!VFS.makeAbsolute(MappedPath))
OverriddenAbsPaths.insert(MappedPath);
OverriddenFiles[Status.getUniqueID()] = PreambleFileHash::createForFile(
@@ -542,15 +550,15 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
llvm::StringMap<PreambleFileHash> OverridenFileBuffers;
for (const auto &RB : PreprocessorOpts.RemappedFileBuffers) {
const PrecompiledPreamble::PreambleFileHash PreambleHash =
- PreambleFileHash::createForMemoryBuffer(RB.second);
+ PreambleFileHash::createForMemoryBuffer(RB.second->getMemBufferRef());
llvm::vfs::Status Status;
- if (moveOnNoError(VFS->status(RB.first), Status))
+ if (moveOnNoError(VFS.status(RB.first), Status))
OverriddenFiles[Status.getUniqueID()] = PreambleHash;
else
OverridenFileBuffers[RB.first] = PreambleHash;
llvm::SmallString<128> MappedPath(RB.first);
- if (!VFS->makeAbsolute(MappedPath))
+ if (!VFS.makeAbsolute(MappedPath))
OverriddenAbsPaths.insert(MappedPath);
}
@@ -566,7 +574,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
}
llvm::vfs::Status Status;
- if (!moveOnNoError(VFS->status(F.first()), Status)) {
+ if (!moveOnNoError(VFS.status(F.first()), Status)) {
// If the file's buffer is not remapped and we can't stat it,
// assume that something horrible happened.
return false;
@@ -595,7 +603,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
return false;
// If a file previously recorded as missing exists as a regular file, then
// consider the preamble out-of-date.
- if (auto Status = VFS->status(F.getKey())) {
+ if (auto Status = VFS.status(F.getKey())) {
if (Status->isRegularFile())
return false;
}
@@ -613,7 +621,7 @@ void PrecompiledPreamble::AddImplicitPreamble(
void PrecompiledPreamble::OverridePreamble(
CompilerInvocation &CI, IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS,
llvm::MemoryBuffer *MainFileBuffer) const {
- auto Bounds = ComputePreambleBounds(*CI.getLangOpts(), MainFileBuffer, 0);
+ auto Bounds = ComputePreambleBounds(*CI.getLangOpts(), *MainFileBuffer, 0);
configurePreamble(Bounds, CI, VFS, MainFileBuffer);
}
@@ -727,7 +735,7 @@ PrecompiledPreamble::PCHStorage::getKind() const {
PrecompiledPreamble::TempPCHFile &PrecompiledPreamble::PCHStorage::asFile() {
assert(getKind() == Kind::TempFile);
- return *reinterpret_cast<TempPCHFile *>(Storage.buffer);
+ return *reinterpret_cast<TempPCHFile *>(&Storage);
}
const PrecompiledPreamble::TempPCHFile &
@@ -738,7 +746,7 @@ PrecompiledPreamble::PCHStorage::asFile() const {
PrecompiledPreamble::InMemoryPreamble &
PrecompiledPreamble::PCHStorage::asMemory() {
assert(getKind() == Kind::InMemory);
- return *reinterpret_cast<InMemoryPreamble *>(Storage.buffer);
+ return *reinterpret_cast<InMemoryPreamble *>(&Storage);
}
const PrecompiledPreamble::InMemoryPreamble &
@@ -776,13 +784,13 @@ PrecompiledPreamble::PreambleFileHash::createForFile(off_t Size,
PrecompiledPreamble::PreambleFileHash
PrecompiledPreamble::PreambleFileHash::createForMemoryBuffer(
- const llvm::MemoryBuffer *Buffer) {
+ const llvm::MemoryBufferRef &Buffer) {
PreambleFileHash Result;
- Result.Size = Buffer->getBufferSize();
+ Result.Size = Buffer.getBufferSize();
Result.ModTime = 0;
llvm::MD5 MD5Ctx;
- MD5Ctx.update(Buffer->getBuffer().data());
+ MD5Ctx.update(Buffer.getBuffer().data());
MD5Ctx.final(Result.MD5);
return Result;
@@ -804,7 +812,8 @@ void PrecompiledPreamble::configurePreamble(
PreprocessorOpts.PrecompiledPreambleBytes.first = Bounds.Size;
PreprocessorOpts.PrecompiledPreambleBytes.second =
Bounds.PreambleEndsAtStartOfLine;
- PreprocessorOpts.DisablePCHValidation = true;
+ PreprocessorOpts.DisablePCHOrModuleValidation =
+ DisableValidationForModuleKind::PCH;
setupPreambleStorage(Storage, PreprocessorOpts, VFS);
}
diff --git a/clang/lib/Frontend/Rewrite/HTMLPrint.cpp b/clang/lib/Frontend/Rewrite/HTMLPrint.cpp
index 982e56cebbca..1388c2e1faab 100644
--- a/clang/lib/Frontend/Rewrite/HTMLPrint.cpp
+++ b/clang/lib/Frontend/Rewrite/HTMLPrint.cpp
@@ -70,7 +70,7 @@ void HTMLPrinter::HandleTranslationUnit(ASTContext &Ctx) {
if (Entry)
Name = Entry->getName();
else
- Name = R.getSourceMgr().getBuffer(FID)->getBufferIdentifier();
+ Name = R.getSourceMgr().getBufferOrFake(FID).getBufferIdentifier();
html::AddLineNumbers(R, FID);
html::AddHeaderFooterInternalBuiltinCSS(R, FID, Name);
diff --git a/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp b/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
index dcf645f67f2f..3f2a78127477 100644
--- a/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -40,17 +40,17 @@ class InclusionRewriter : public PPCallbacks {
SourceManager &SM; ///< Used to read and manage source files.
raw_ostream &OS; ///< The destination stream for rewritten contents.
StringRef MainEOL; ///< The line ending marker to use.
- const llvm::MemoryBuffer *PredefinesBuffer; ///< The preprocessor predefines.
+ llvm::MemoryBufferRef PredefinesBuffer; ///< The preprocessor predefines.
bool ShowLineMarkers; ///< Show #line markers.
bool UseLineDirectives; ///< Use of line directives or line markers.
/// Tracks where inclusions that change the file are found.
- std::map<unsigned, IncludedFile> FileIncludes;
+ std::map<SourceLocation, IncludedFile> FileIncludes;
/// Tracks where inclusions that import modules are found.
- std::map<unsigned, const Module *> ModuleIncludes;
+ std::map<SourceLocation, const Module *> ModuleIncludes;
/// Tracks where inclusions that enter modules (in a module build) are found.
- std::map<unsigned, const Module *> ModuleEntryIncludes;
+ std::map<SourceLocation, const Module *> ModuleEntryIncludes;
/// Tracks where #if and #elif directives get evaluated and whether to true.
- std::map<unsigned, bool> IfConditions;
+ std::map<SourceLocation, bool> IfConditions;
/// Used transitively for building up the FileIncludes mapping over the
/// various \c PPCallbacks callbacks.
SourceLocation LastInclusionLocation;
@@ -59,14 +59,14 @@ public:
bool UseLineDirectives);
void Process(FileID FileId, SrcMgr::CharacteristicKind FileType,
const DirectoryLookup *DirLookup);
- void setPredefinesBuffer(const llvm::MemoryBuffer *Buf) {
+ void setPredefinesBuffer(const llvm::MemoryBufferRef &Buf) {
PredefinesBuffer = Buf;
}
void detectMainFileEOL();
void handleModuleBegin(Token &Tok) {
assert(Tok.getKind() == tok::annot_module_begin);
- ModuleEntryIncludes.insert({Tok.getLocation().getRawEncoding(),
- (Module *)Tok.getAnnotationValue()});
+ ModuleEntryIncludes.insert(
+ {Tok.getLocation(), (Module *)Tok.getAnnotationValue()});
}
private:
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
@@ -88,12 +88,11 @@ private:
SrcMgr::CharacteristicKind FileType,
StringRef Extra = StringRef());
void WriteImplicitModuleImport(const Module *Mod);
- void OutputContentUpTo(const MemoryBuffer &FromFile,
- unsigned &WriteFrom, unsigned WriteTo,
- StringRef EOL, int &lines,
+ void OutputContentUpTo(const MemoryBufferRef &FromFile, unsigned &WriteFrom,
+ unsigned WriteTo, StringRef EOL, int &lines,
bool EnsureNewline);
void CommentOutDirective(Lexer &DirectivesLex, const Token &StartToken,
- const MemoryBuffer &FromFile, StringRef EOL,
+ const MemoryBufferRef &FromFile, StringRef EOL,
unsigned &NextToWrite, int &Lines);
const IncludedFile *FindIncludeAtLocation(SourceLocation Loc) const;
const Module *FindModuleAtLocation(SourceLocation Loc) const;
@@ -109,8 +108,7 @@ InclusionRewriter::InclusionRewriter(Preprocessor &PP, raw_ostream &OS,
bool ShowLineMarkers,
bool UseLineDirectives)
: PP(PP), SM(PP.getSourceManager()), OS(OS), MainEOL("\n"),
- PredefinesBuffer(nullptr), ShowLineMarkers(ShowLineMarkers),
- UseLineDirectives(UseLineDirectives),
+ ShowLineMarkers(ShowLineMarkers), UseLineDirectives(UseLineDirectives),
LastInclusionLocation(SourceLocation()) {}
/// Write appropriate line information as either #line directives or GNU line
@@ -164,7 +162,7 @@ void InclusionRewriter::FileChanged(SourceLocation Loc,
return;
FileID Id = FullSourceLoc(Loc, SM).getFileID();
auto P = FileIncludes.insert(
- std::make_pair(LastInclusionLocation.getRawEncoding(),
+ std::make_pair(LastInclusionLocation,
IncludedFile(Id, NewFileType, PP.GetCurDirLookup())));
(void)P;
assert(P.second && "Unexpected revisitation of the same include directive");
@@ -199,8 +197,7 @@ void InclusionRewriter::InclusionDirective(SourceLocation HashLoc,
const Module *Imported,
SrcMgr::CharacteristicKind FileType){
if (Imported) {
- auto P = ModuleIncludes.insert(
- std::make_pair(HashLoc.getRawEncoding(), Imported));
+ auto P = ModuleIncludes.insert(std::make_pair(HashLoc, Imported));
(void)P;
assert(P.second && "Unexpected revisitation of the same include directive");
} else
@@ -209,8 +206,7 @@ void InclusionRewriter::InclusionDirective(SourceLocation HashLoc,
void InclusionRewriter::If(SourceLocation Loc, SourceRange ConditionRange,
ConditionValueKind ConditionValue) {
- auto P = IfConditions.insert(
- std::make_pair(Loc.getRawEncoding(), ConditionValue == CVK_True));
+ auto P = IfConditions.insert(std::make_pair(Loc, ConditionValue == CVK_True));
(void)P;
assert(P.second && "Unexpected revisitation of the same if directive");
}
@@ -218,8 +214,7 @@ void InclusionRewriter::If(SourceLocation Loc, SourceRange ConditionRange,
void InclusionRewriter::Elif(SourceLocation Loc, SourceRange ConditionRange,
ConditionValueKind ConditionValue,
SourceLocation IfLoc) {
- auto P = IfConditions.insert(
- std::make_pair(Loc.getRawEncoding(), ConditionValue == CVK_True));
+ auto P = IfConditions.insert(std::make_pair(Loc, ConditionValue == CVK_True));
(void)P;
assert(P.second && "Unexpected revisitation of the same elif directive");
}
@@ -228,7 +223,7 @@ void InclusionRewriter::Elif(SourceLocation Loc, SourceRange ConditionRange,
/// an inclusion directive) in the map of inclusion information, FileChanges.
const InclusionRewriter::IncludedFile *
InclusionRewriter::FindIncludeAtLocation(SourceLocation Loc) const {
- const auto I = FileIncludes.find(Loc.getRawEncoding());
+ const auto I = FileIncludes.find(Loc);
if (I != FileIncludes.end())
return &I->second;
return nullptr;
@@ -238,7 +233,7 @@ InclusionRewriter::FindIncludeAtLocation(SourceLocation Loc) const {
/// an inclusion directive) in the map of module inclusion information.
const Module *
InclusionRewriter::FindModuleAtLocation(SourceLocation Loc) const {
- const auto I = ModuleIncludes.find(Loc.getRawEncoding());
+ const auto I = ModuleIncludes.find(Loc);
if (I != ModuleIncludes.end())
return I->second;
return nullptr;
@@ -248,14 +243,14 @@ InclusionRewriter::FindModuleAtLocation(SourceLocation Loc) const {
/// an inclusion directive) in the map of module entry information.
const Module *
InclusionRewriter::FindEnteredModule(SourceLocation Loc) const {
- const auto I = ModuleEntryIncludes.find(Loc.getRawEncoding());
+ const auto I = ModuleEntryIncludes.find(Loc);
if (I != ModuleEntryIncludes.end())
return I->second;
return nullptr;
}
bool InclusionRewriter::IsIfAtLocationTrue(SourceLocation Loc) const {
- const auto I = IfConditions.find(Loc.getRawEncoding());
+ const auto I = IfConditions.find(Loc);
if (I != IfConditions.end())
return I->second;
return false;
@@ -263,7 +258,7 @@ bool InclusionRewriter::IsIfAtLocationTrue(SourceLocation Loc) const {
/// Detect the likely line ending style of \p FromFile by examining the first
/// newline found within it.
-static StringRef DetectEOL(const MemoryBuffer &FromFile) {
+static StringRef DetectEOL(const MemoryBufferRef &FromFile) {
// Detect what line endings the file uses, so that added content does not mix
// the style. We need to check for "\r\n" first because "\n\r" will match
// "\r\n\r\n".
@@ -278,23 +273,22 @@ static StringRef DetectEOL(const MemoryBuffer &FromFile) {
}
void InclusionRewriter::detectMainFileEOL() {
- bool Invalid;
- const MemoryBuffer &FromFile = *SM.getBuffer(SM.getMainFileID(), &Invalid);
- assert(!Invalid);
- if (Invalid)
+ Optional<MemoryBufferRef> FromFile = *SM.getBufferOrNone(SM.getMainFileID());
+ assert(FromFile);
+ if (!FromFile)
return; // Should never happen, but whatever.
- MainEOL = DetectEOL(FromFile);
+ MainEOL = DetectEOL(*FromFile);
}
/// Writes out bytes from \p FromFile, starting at \p NextToWrite and ending at
/// \p WriteTo - 1.
-void InclusionRewriter::OutputContentUpTo(const MemoryBuffer &FromFile,
+void InclusionRewriter::OutputContentUpTo(const MemoryBufferRef &FromFile,
unsigned &WriteFrom, unsigned WriteTo,
StringRef LocalEOL, int &Line,
bool EnsureNewline) {
if (WriteTo <= WriteFrom)
return;
- if (&FromFile == PredefinesBuffer) {
+ if (FromFile == PredefinesBuffer) {
// Ignore the #defines of the predefines buffer.
WriteFrom = WriteTo;
return;
@@ -341,7 +335,7 @@ void InclusionRewriter::OutputContentUpTo(const MemoryBuffer &FromFile,
/// through the \p FromFile buffer.
void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex,
const Token &StartToken,
- const MemoryBuffer &FromFile,
+ const MemoryBufferRef &FromFile,
StringRef LocalEOL,
unsigned &NextToWrite, int &Line) {
OutputContentUpTo(FromFile, NextToWrite,
@@ -351,7 +345,7 @@ void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex,
do {
DirectiveLex.LexFromRawLexer(DirectiveToken);
} while (!DirectiveToken.is(tok::eod) && DirectiveToken.isNot(tok::eof));
- if (&FromFile == PredefinesBuffer) {
+ if (FromFile == PredefinesBuffer) {
// OutputContentUpTo() would not output anything anyway.
return;
}
@@ -379,11 +373,15 @@ StringRef InclusionRewriter::NextIdentifierName(Lexer &RawLex,
void InclusionRewriter::Process(FileID FileId,
SrcMgr::CharacteristicKind FileType,
const DirectoryLookup *DirLookup) {
- bool Invalid;
- const MemoryBuffer &FromFile = *SM.getBuffer(FileId, &Invalid);
- assert(!Invalid && "Attempting to process invalid inclusion");
+ MemoryBufferRef FromFile;
+ {
+ auto B = SM.getBufferOrNone(FileId);
+ assert(B && "Attempting to process invalid inclusion");
+ if (B)
+ FromFile = *B;
+ }
StringRef FileName = FromFile.getBufferIdentifier();
- Lexer RawLex(FileId, &FromFile, PP.getSourceManager(), PP.getLangOpts());
+ Lexer RawLex(FileId, FromFile, PP.getSourceManager(), PP.getLangOpts());
RawLex.SetCommentRetentionState(false);
StringRef LocalEOL = DetectEOL(FromFile);
@@ -560,7 +558,7 @@ void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS,
if (Tok.is(tok::annot_module_begin))
Rewrite->handleModuleBegin(Tok);
} while (Tok.isNot(tok::eof));
- Rewrite->setPredefinesBuffer(SM.getBuffer(PP.getPredefinesFileID()));
+ Rewrite->setPredefinesBuffer(SM.getBufferOrFake(PP.getPredefinesFileID()));
Rewrite->Process(PP.getPredefinesFileID(), SrcMgr::C_User, nullptr);
Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User, nullptr);
OS->flush();
diff --git a/clang/lib/Frontend/Rewrite/RewriteMacros.cpp b/clang/lib/Frontend/Rewrite/RewriteMacros.cpp
index 6b67ee638353..5701b271aff1 100644
--- a/clang/lib/Frontend/Rewrite/RewriteMacros.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteMacros.cpp
@@ -64,7 +64,7 @@ static void LexRawTokensFromMainFile(Preprocessor &PP,
// Create a lexer to lex all the tokens of the main file in raw mode. Even
// though it is in raw mode, it will not return comments.
- const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
+ llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(SM.getMainFileID());
Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts());
// Switch on comment lexing because we really do want them.
diff --git a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index e122b10e76d3..9d5366bb161e 100644
--- a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -586,7 +586,8 @@ namespace {
CastKind Kind, Expr *E) {
TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, nullptr,
- TInfo, SourceLocation(), SourceLocation());
+ FPOptionsOverride(), TInfo,
+ SourceLocation(), SourceLocation());
}
bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
@@ -701,9 +702,9 @@ void RewriteModernObjC::InitializeCommon(ASTContext &context) {
// Get the ID and start/end of the main file.
MainFileID = SM->getMainFileID();
- const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
- MainFileStart = MainBuf->getBufferStart();
- MainFileEnd = MainBuf->getBufferEnd();
+ llvm::MemoryBufferRef MainBuf = SM->getBufferOrFake(MainFileID);
+ MainFileStart = MainBuf.getBufferStart();
+ MainFileEnd = MainBuf.getBufferEnd();
Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts());
}
@@ -2105,12 +2106,13 @@ RewriteModernObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
// Now, we cast the reference to a pointer to the objc_msgSend type.
QualType pToFunc = Context->getPointerType(msgSendType);
ImplicitCastExpr *ICE =
- ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
- DRE, nullptr, VK_RValue);
+ ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
+ DRE, nullptr, VK_RValue, FPOptionsOverride());
const auto *FT = msgSendType->castAs<FunctionType>();
- CallExpr *Exp = CallExpr::Create(
- *Context, ICE, Args, FT->getCallResultType(*Context), VK_RValue, EndLoc);
+ CallExpr *Exp =
+ CallExpr::Create(*Context, ICE, Args, FT->getCallResultType(*Context),
+ VK_RValue, EndLoc, FPOptionsOverride());
return Exp;
}
@@ -2692,7 +2694,7 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
auto *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, EndLoc);
+ VK_RValue, EndLoc, FPOptionsOverride());
ReplaceStmt(Exp, CE);
return CE;
}
@@ -2732,7 +2734,7 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
InitExprs.push_back(Exp->getElement(i));
Expr *NSArrayCallExpr =
CallExpr::Create(*Context, NSArrayDRE, InitExprs, NSArrayFType, VK_LValue,
- SourceLocation());
+ SourceLocation(), FPOptionsOverride());
FieldDecl *ARRFD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
@@ -2813,7 +2815,7 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
const FunctionType *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, EndLoc);
+ VK_RValue, EndLoc, FPOptionsOverride());
ReplaceStmt(Exp, CE);
return CE;
}
@@ -2861,7 +2863,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
// (const id [])objects
Expr *NSValueCallExpr =
CallExpr::Create(*Context, NSDictDRE, ValueExprs, NSDictFType, VK_LValue,
- SourceLocation());
+ SourceLocation(), FPOptionsOverride());
FieldDecl *ARRFD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
@@ -2879,8 +2881,9 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
CK_BitCast,
DictLiteralValueME);
// (const id <NSCopying> [])keys
- Expr *NSKeyCallExpr = CallExpr::Create(
- *Context, NSDictDRE, KeyExprs, NSDictFType, VK_LValue, SourceLocation());
+ Expr *NSKeyCallExpr =
+ CallExpr::Create(*Context, NSDictDRE, KeyExprs, NSDictFType, VK_LValue,
+ SourceLocation(), FPOptionsOverride());
MemberExpr *DictLiteralKeyME =
MemberExpr::CreateImplicit(*Context, NSKeyCallExpr, false, ARRFD,
@@ -2964,7 +2967,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
const FunctionType *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, EndLoc);
+ VK_RValue, EndLoc, FPOptionsOverride());
ReplaceStmt(Exp, CE);
return CE;
}
@@ -3175,8 +3178,9 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla
ID, FuncType, nullptr, SC_Extern, false, false);
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, FD, false, castType, VK_RValue, SourceLocation());
- CallExpr *STCE = CallExpr::Create(*Context, DRE, MsgExprs, castType,
- VK_LValue, SourceLocation());
+ CallExpr *STCE =
+ CallExpr::Create(*Context, DRE, MsgExprs, castType, VK_LValue,
+ SourceLocation(), FPOptionsOverride());
FieldDecl *FieldD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
@@ -3276,8 +3280,9 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, SuperConstructorFunctionDecl, false, superType,
VK_LValue, SourceLocation());
- SuperRep = CallExpr::Create(*Context, DRE, InitExprs, superType,
- VK_LValue, SourceLocation());
+ SuperRep =
+ CallExpr::Create(*Context, DRE, InitExprs, superType, VK_LValue,
+ SourceLocation(), FPOptionsOverride());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
@@ -3371,8 +3376,9 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, SuperConstructorFunctionDecl, false, superType,
VK_LValue, SourceLocation());
- SuperRep = CallExpr::Create(*Context, DRE, InitExprs, superType,
- VK_LValue, SourceLocation());
+ SuperRep =
+ CallExpr::Create(*Context, DRE, InitExprs, superType, VK_LValue,
+ SourceLocation(), FPOptionsOverride());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
@@ -3537,7 +3543,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
const FunctionType *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, EndLoc);
+ VK_RValue, EndLoc, FPOptionsOverride());
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
@@ -4647,8 +4653,9 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp
E = Exp->arg_end(); I != E; ++I) {
BlkExprs.push_back(*I);
}
- CallExpr *CE = CallExpr::Create(*Context, PE, BlkExprs, Exp->getType(),
- VK_RValue, SourceLocation());
+ CallExpr *CE =
+ CallExpr::Create(*Context, PE, BlkExprs, Exp->getType(), VK_RValue,
+ SourceLocation(), FPOptionsOverride());
return CE;
}
@@ -5391,7 +5398,7 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
InitExprs.push_back(FlagExp);
}
NewRep = CallExpr::Create(*Context, DRE, InitExprs, FType, VK_LValue,
- SourceLocation());
+ SourceLocation(), FPOptionsOverride());
if (GlobalBlockExpr) {
assert (!GlobalConstructionExp &&
diff --git a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
index 3f320dc57aa6..543b3b09a9cc 100644
--- a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -492,7 +492,8 @@ namespace {
CastKind Kind, Expr *E) {
TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, nullptr,
- TInfo, SourceLocation(), SourceLocation());
+ FPOptionsOverride(), TInfo,
+ SourceLocation(), SourceLocation());
}
StringLiteral *getStringLiteral(StringRef Str) {
@@ -630,9 +631,9 @@ void RewriteObjC::InitializeCommon(ASTContext &context) {
// Get the ID and start/end of the main file.
MainFileID = SM->getMainFileID();
- const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
- MainFileStart = MainBuf->getBufferStart();
- MainFileEnd = MainBuf->getBufferEnd();
+ llvm::MemoryBufferRef MainBuf = SM->getBufferOrFake(MainFileID);
+ MainFileStart = MainBuf.getBufferStart();
+ MainFileEnd = MainBuf.getBufferEnd();
Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts());
}
@@ -2022,13 +2023,14 @@ RewriteObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
// Now, we cast the reference to a pointer to the objc_msgSend type.
QualType pToFunc = Context->getPointerType(msgSendType);
ImplicitCastExpr *ICE =
- ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
- DRE, nullptr, VK_RValue);
+ ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
+ DRE, nullptr, VK_RValue, FPOptionsOverride());
const auto *FT = msgSendType->castAs<FunctionType>();
- CallExpr *Exp = CallExpr::Create(
- *Context, ICE, Args, FT->getCallResultType(*Context), VK_RValue, EndLoc);
+ CallExpr *Exp =
+ CallExpr::Create(*Context, ICE, Args, FT->getCallResultType(*Context),
+ VK_RValue, EndLoc, FPOptionsOverride());
return Exp;
}
@@ -2614,8 +2616,9 @@ CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavo
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
const auto *FT = msgSendType->castAs<FunctionType>();
- CallExpr *STCE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, SourceLocation());
+ CallExpr *STCE =
+ CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue,
+ SourceLocation(), FPOptionsOverride());
return STCE;
}
@@ -2707,8 +2710,9 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, SuperConstructorFunctionDecl, false, superType,
VK_LValue, SourceLocation());
- SuperRep = CallExpr::Create(*Context, DRE, InitExprs, superType,
- VK_LValue, SourceLocation());
+ SuperRep =
+ CallExpr::Create(*Context, DRE, InitExprs, superType, VK_LValue,
+ SourceLocation(), FPOptionsOverride());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
@@ -2802,8 +2806,9 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, SuperConstructorFunctionDecl, false, superType,
VK_LValue, SourceLocation());
- SuperRep = CallExpr::Create(*Context, DRE, InitExprs, superType,
- VK_LValue, SourceLocation());
+ SuperRep =
+ CallExpr::Create(*Context, DRE, InitExprs, superType, VK_LValue,
+ SourceLocation(), FPOptionsOverride());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
@@ -2968,7 +2973,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
const auto *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, EndLoc);
+ VK_RValue, EndLoc, FPOptionsOverride());
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
@@ -3817,8 +3822,9 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
E = Exp->arg_end(); I != E; ++I) {
BlkExprs.push_back(*I);
}
- CallExpr *CE = CallExpr::Create(*Context, PE, BlkExprs, Exp->getType(),
- VK_RValue, SourceLocation());
+ CallExpr *CE =
+ CallExpr::Create(*Context, PE, BlkExprs, Exp->getType(), VK_RValue,
+ SourceLocation(), FPOptionsOverride());
return CE;
}
@@ -4530,7 +4536,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
InitExprs.push_back(FlagExp);
}
NewRep = CallExpr::Create(*Context, DRE, InitExprs, FType, VK_LValue,
- SourceLocation());
+ SourceLocation(), FPOptionsOverride());
NewRep = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), NewRep, UO_AddrOf,
Context->getPointerType(NewRep->getType()), VK_RValue, OK_Ordinary,
@@ -5279,9 +5285,8 @@ void RewriteObjCFragileABI::RewriteObjCClassMetaData(ObjCImplementationDecl *IDe
}
// Build _objc_ivar_list metadata for classes ivars if needed
- unsigned NumIvars = !IDecl->ivar_empty()
- ? IDecl->ivar_size()
- : (CDecl ? CDecl->ivar_size() : 0);
+ unsigned NumIvars =
+ !IDecl->ivar_empty() ? IDecl->ivar_size() : CDecl->ivar_size();
if (NumIvars > 0) {
static bool objc_ivar = false;
if (!objc_ivar) {
diff --git a/clang/lib/Frontend/TextDiagnostic.cpp b/clang/lib/Frontend/TextDiagnostic.cpp
index 78acaaf9f96e..e781fd2c0229 100644
--- a/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/clang/lib/Frontend/TextDiagnostic.cpp
@@ -827,7 +827,10 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
emitFilename(PLoc.getFilename(), Loc.getManager());
switch (DiagOpts->getFormat()) {
- case DiagnosticOptions::Clang: OS << ':' << LineNo; break;
+ case DiagnosticOptions::Clang:
+ if (DiagOpts->ShowLine)
+ OS << ':' << LineNo;
+ break;
case DiagnosticOptions::MSVC: OS << '(' << LineNo; break;
case DiagnosticOptions::Vi: OS << " +" << LineNo; break;
}
diff --git a/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/clang/lib/Frontend/TextDiagnosticPrinter.cpp
index 0c0a44a1388b..9feb3c64039f 100644
--- a/clang/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/clang/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -135,10 +135,10 @@ void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
if (!Info.getLocation().isValid()) {
TextDiagnostic::printDiagnosticLevel(OS, Level, DiagOpts->ShowColors,
DiagOpts->CLFallbackMode);
- TextDiagnostic::printDiagnosticMessage(OS, Level, DiagMessageStream.str(),
- OS.tell() - StartOfLocationInfo,
- DiagOpts->MessageLength,
- DiagOpts->ShowColors);
+ TextDiagnostic::printDiagnosticMessage(
+ OS, /*IsSupplemental=*/Level == DiagnosticsEngine::Note,
+ DiagMessageStream.str(), OS.tell() - StartOfLocationInfo,
+ DiagOpts->MessageLength, DiagOpts->ShowColors);
OS.flush();
return;
}
diff --git a/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 56e05242f7c9..0503ae46a15f 100644
--- a/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -554,15 +554,15 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
continue;
}
- const FileEntry *FE = &File->getFileEntry();
- if (SM.translateFile(FE).isInvalid())
- SM.createFileID(FE, Pos, SrcMgr::C_User);
+ FileID FID = SM.translateFile(*File);
+ if (FID.isInvalid())
+ FID = SM.createFileID(*File, Pos, SrcMgr::C_User);
if (PH.Next(Line) && Line > 0)
- ExpectedLoc = SM.translateFileLineCol(FE, Line, 1);
+ ExpectedLoc = SM.translateLineCol(FID, Line, 1);
else if (PH.Next("*")) {
MatchAnyLine = true;
- ExpectedLoc = SM.translateFileLineCol(FE, 1, 1);
+ ExpectedLoc = SM.translateLineCol(FID, 1, 1);
}
}
} else if (PH.Next("*")) {
@@ -827,7 +827,7 @@ static bool findDirectives(SourceManager &SM, FileID FID,
return false;
// Create a lexer to lex all the tokens of the main file in raw mode.
- const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(FID);
Lexer RawLex(FID, FromFile, SM, LangOpts);
// Return comments as tokens, this is how we find expected diagnostics.
diff --git a/clang/lib/Headers/__clang_cuda_builtin_vars.h b/clang/lib/Headers/__clang_cuda_builtin_vars.h
index 2ba1521f2580..412e823a827f 100644
--- a/clang/lib/Headers/__clang_cuda_builtin_vars.h
+++ b/clang/lib/Headers/__clang_cuda_builtin_vars.h
@@ -55,7 +55,9 @@ struct __cuda_builtin_threadIdx_t {
__CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_tid_z());
// threadIdx should be convertible to uint3 (in fact in nvcc, it *is* a
// uint3). This function is defined after we pull in vector_types.h.
+ __attribute__((device)) operator dim3() const;
__attribute__((device)) operator uint3() const;
+
private:
__CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t);
};
@@ -66,7 +68,9 @@ struct __cuda_builtin_blockIdx_t {
__CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ctaid_z());
// blockIdx should be convertible to uint3 (in fact in nvcc, it *is* a
// uint3). This function is defined after we pull in vector_types.h.
+ __attribute__((device)) operator dim3() const;
__attribute__((device)) operator uint3() const;
+
private:
__CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t);
};
@@ -78,6 +82,8 @@ struct __cuda_builtin_blockDim_t {
// blockDim should be convertible to dim3 (in fact in nvcc, it *is* a
// dim3). This function is defined after we pull in vector_types.h.
__attribute__((device)) operator dim3() const;
+ __attribute__((device)) operator uint3() const;
+
private:
__CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t);
};
@@ -89,6 +95,8 @@ struct __cuda_builtin_gridDim_t {
// gridDim should be convertible to dim3 (in fact in nvcc, it *is* a
// dim3). This function is defined after we pull in vector_types.h.
__attribute__((device)) operator dim3() const;
+ __attribute__((device)) operator uint3() const;
+
private:
__CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t);
};
@@ -108,5 +116,6 @@ __attribute__((device)) const int warpSize = 32;
#undef __CUDA_DEVICE_BUILTIN
#undef __CUDA_BUILTIN_VAR
#undef __CUDA_DISALLOW_BUILTINVAR_ACCESS
+#undef __DELETE
#endif /* __CUDA_BUILTIN_VARS_H */
diff --git a/clang/lib/Headers/__clang_cuda_cmath.h b/clang/lib/Headers/__clang_cuda_cmath.h
index 8ba182689a4f..5bbb59a93b9e 100644
--- a/clang/lib/Headers/__clang_cuda_cmath.h
+++ b/clang/lib/Headers/__clang_cuda_cmath.h
@@ -66,10 +66,38 @@ __DEVICE__ float frexp(float __arg, int *__exp) {
}
// For inscrutable reasons, the CUDA headers define these functions for us on
-// Windows. For OpenMP we omit these as some old system headers have
-// non-conforming `isinf(float)` and `isnan(float)` implementations that return
-// an `int`. The system versions of these functions should be fine anyway.
-#if !defined(_MSC_VER) && !defined(__OPENMP_NVPTX__)
+// Windows.
+#if !defined(_MSC_VER) || defined(__OPENMP_NVPTX__)
+
+// For OpenMP we work around some old system headers that have non-conforming
+// `isinf(float)` and `isnan(float)` implementations that return an `int`. We do
+// this by providing two versions of these functions, differing only in the
+// return type. To avoid conflicting definitions we disable implicit base
+// function generation. That means we will end up with two specializations, one
+// per type, but only one has a base function defined by the system header.
+#if defined(__OPENMP_NVPTX__)
+#pragma omp begin declare variant match( \
+ implementation = {extension(disable_implicit_base)})
+
+// FIXME: We lack an extension to customize the mangling of the variants, e.g.,
+// add a suffix. This means we would clash with the names of the variants
+// (note that we do not create implicit base functions here). To avoid
+// this clash we add a new trait to some of them that is always true
+// (this is LLVM after all ;)). It will only influence the mangled name
+// of the variants inside the inner region and avoid the clash.
+#pragma omp begin declare variant match(implementation = {vendor(llvm)})
+
+__DEVICE__ int isinf(float __x) { return ::__isinff(__x); }
+__DEVICE__ int isinf(double __x) { return ::__isinf(__x); }
+__DEVICE__ int isfinite(float __x) { return ::__finitef(__x); }
+__DEVICE__ int isfinite(double __x) { return ::__isfinited(__x); }
+__DEVICE__ int isnan(float __x) { return ::__isnanf(__x); }
+__DEVICE__ int isnan(double __x) { return ::__isnan(__x); }
+
+#pragma omp end declare variant
+
+#endif
+
__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
@@ -79,6 +107,11 @@ __DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
__DEVICE__ bool isfinite(double __x) { return ::__isfinited(__x); }
__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
+
+#if defined(__OPENMP_NVPTX__)
+#pragma omp end declare variant
+#endif
+
#endif
__DEVICE__ bool isgreater(float __x, float __y) {
@@ -142,6 +175,15 @@ __DEVICE__ float sqrt(float __x) { return ::sqrtf(__x); }
__DEVICE__ float tan(float __x) { return ::tanf(__x); }
__DEVICE__ float tanh(float __x) { return ::tanhf(__x); }
+// There was a redefinition error for this this overload in CUDA mode.
+// We restrict it to OpenMP mode for now, that is where it is actually needed
+// anyway.
+#ifdef __OPENMP_NVPTX__
+__DEVICE__ float remquo(float __n, float __d, int *__q) {
+ return ::remquof(__n, __d, __q);
+}
+#endif
+
// Notably missing above is nexttoward. We omit it because
// libdevice doesn't provide an implementation, and we don't want to be in the
// business of implementing tricky libm functions in this header.
diff --git a/clang/lib/Headers/__clang_cuda_complex_builtins.h b/clang/lib/Headers/__clang_cuda_complex_builtins.h
index 8c10ff6b461f..2b701fef0ea2 100644
--- a/clang/lib/Headers/__clang_cuda_complex_builtins.h
+++ b/clang/lib/Headers/__clang_cuda_complex_builtins.h
@@ -16,7 +16,7 @@
// to work with CUDA and OpenMP target offloading [in C and C++ mode].)
#pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
+#ifdef __OPENMP_NVPTX__
#pragma omp declare target
#define __DEVICE__ __attribute__((noinline, nothrow, cold, weak))
#else
@@ -26,7 +26,7 @@
// To make the algorithms available for C and C++ in CUDA and OpenMP we select
// different but equivalent function versions. TODO: For OpenMP we currently
// select the native builtins as the overload support for templates is lacking.
-#if !defined(_OPENMP)
+#if !defined(__OPENMP_NVPTX__)
#define _ISNANd std::isnan
#define _ISNANf std::isnan
#define _ISINFd std::isinf
@@ -41,6 +41,27 @@
#define _ABSf std::abs
#define _LOGBd std::logb
#define _LOGBf std::logb
+// Rather than pulling in std::max from algorithm everytime, use available ::max.
+#define _fmaxd max
+#define _fmaxf max
+#else
+#ifdef __AMDGCN__
+#define _ISNANd __ocml_isnan_f64
+#define _ISNANf __ocml_isnan_f32
+#define _ISINFd __ocml_isinf_f64
+#define _ISINFf __ocml_isinf_f32
+#define _ISFINITEd __ocml_isfinite_f64
+#define _ISFINITEf __ocml_isfinite_f32
+#define _COPYSIGNd __ocml_copysign_f64
+#define _COPYSIGNf __ocml_copysign_f32
+#define _SCALBNd __ocml_scalbn_f64
+#define _SCALBNf __ocml_scalbn_f32
+#define _ABSd __ocml_fabs_f64
+#define _ABSf __ocml_fabs_f32
+#define _LOGBd __ocml_logb_f64
+#define _LOGBf __ocml_logb_f32
+#define _fmaxd __ocml_fmax_f64
+#define _fmaxf __ocml_fmax_f32
#else
#define _ISNANd __nv_isnand
#define _ISNANf __nv_isnanf
@@ -56,6 +77,9 @@
#define _ABSf __nv_fabsf
#define _LOGBd __nv_logb
#define _LOGBf __nv_logbf
+#define _fmaxd __nv_fmax
+#define _fmaxf __nv_fmaxf
+#endif
#endif
#if defined(__cplusplus)
@@ -167,7 +191,7 @@ __DEVICE__ double _Complex __divdc3(double __a, double __b, double __c,
// Can't use std::max, because that's defined in <algorithm>, and we don't
// want to pull that in for every compile. The CUDA headers define
// ::max(float, float) and ::max(double, double), which is sufficient for us.
- double __logbw = _LOGBd(max(_ABSd(__c), _ABSd(__d)));
+ double __logbw = _LOGBd(_fmaxd(_ABSd(__c), _ABSd(__d)));
if (_ISFINITEd(__logbw)) {
__ilogbw = (int)__logbw;
__c = _SCALBNd(__c, -__ilogbw);
@@ -200,7 +224,7 @@ __DEVICE__ double _Complex __divdc3(double __a, double __b, double __c,
__DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) {
int __ilogbw = 0;
- float __logbw = _LOGBf(max(_ABSf(__c), _ABSf(__d)));
+ float __logbw = _LOGBf(_fmaxf(_ABSf(__c), _ABSf(__d)));
if (_ISFINITEf(__logbw)) {
__ilogbw = (int)__logbw;
__c = _SCALBNf(__c, -__ilogbw);
@@ -249,8 +273,10 @@ __DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) {
#undef _ABSf
#undef _LOGBd
#undef _LOGBf
+#undef _fmaxd
+#undef _fmaxf
-#ifdef _OPENMP
+#ifdef __OPENMP_NVPTX__
#pragma omp end declare target
#endif
diff --git a/clang/lib/Headers/__clang_cuda_math.h b/clang/lib/Headers/__clang_cuda_math.h
index 332e616702ac..acb26ad345d5 100644
--- a/clang/lib/Headers/__clang_cuda_math.h
+++ b/clang/lib/Headers/__clang_cuda_math.h
@@ -195,8 +195,8 @@ __DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
+__DEVICE__ double nearbyint(double __a) { return __builtin_nearbyint(__a); }
+__DEVICE__ float nearbyintf(float __a) { return __builtin_nearbyintf(__a); }
__DEVICE__ double nextafter(double __a, double __b) {
return __nv_nextafter(__a, __b);
}
@@ -249,8 +249,9 @@ __DEVICE__ double rhypot(double __a, double __b) {
__DEVICE__ float rhypotf(float __a, float __b) {
return __nv_rhypotf(__a, __b);
}
-__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
+// __nv_rint* in libdevice is buggy and produces incorrect results.
+__DEVICE__ double rint(double __a) { return __builtin_rint(__a); }
+__DEVICE__ float rintf(float __a) { return __builtin_rintf(__a); }
__DEVICE__ double rnorm(int __a, const double *__b) {
return __nv_rnorm(__a, __b);
}
diff --git a/clang/lib/Headers/__clang_cuda_math_forward_declares.h b/clang/lib/Headers/__clang_cuda_math_forward_declares.h
index 8a270859e4a5..c0f1f47cc993 100644
--- a/clang/lib/Headers/__clang_cuda_math_forward_declares.h
+++ b/clang/lib/Headers/__clang_cuda_math_forward_declares.h
@@ -160,6 +160,9 @@ __DEVICE__ double scalbln(double, long);
__DEVICE__ float scalbln(float, long);
__DEVICE__ double scalbn(double, int);
__DEVICE__ float scalbn(float, int);
+#ifdef _MSC_VER
+__DEVICE__ bool signbit(long double);
+#endif
__DEVICE__ bool signbit(double);
__DEVICE__ bool signbit(float);
__DEVICE__ double sin(double);
diff --git a/clang/lib/Headers/__clang_cuda_runtime_wrapper.h b/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
index f43ed55de489..f88c39a9b6e5 100644
--- a/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
+++ b/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -377,30 +377,38 @@ __device__ static inline void *malloc(size_t __size) {
// Out-of-line implementations from __clang_cuda_builtin_vars.h. These need to
// come after we've pulled in the definition of uint3 and dim3.
+__device__ inline __cuda_builtin_threadIdx_t::operator dim3() const {
+ return dim3(x, y, z);
+}
+
__device__ inline __cuda_builtin_threadIdx_t::operator uint3() const {
- uint3 ret;
- ret.x = x;
- ret.y = y;
- ret.z = z;
- return ret;
+ return {x, y, z};
+}
+
+__device__ inline __cuda_builtin_blockIdx_t::operator dim3() const {
+ return dim3(x, y, z);
}
__device__ inline __cuda_builtin_blockIdx_t::operator uint3() const {
- uint3 ret;
- ret.x = x;
- ret.y = y;
- ret.z = z;
- return ret;
+ return {x, y, z};
}
__device__ inline __cuda_builtin_blockDim_t::operator dim3() const {
return dim3(x, y, z);
}
+__device__ inline __cuda_builtin_blockDim_t::operator uint3() const {
+ return {x, y, z};
+}
+
__device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
return dim3(x, y, z);
}
+__device__ inline __cuda_builtin_gridDim_t::operator uint3() const {
+ return {x, y, z};
+}
+
#include <__clang_cuda_cmath.h>
#include <__clang_cuda_intrinsics.h>
#include <__clang_cuda_complex_builtins.h>
diff --git a/clang/lib/Headers/__clang_hip_cmath.h b/clang/lib/Headers/__clang_hip_cmath.h
new file mode 100644
index 000000000000..cd22a2df954b
--- /dev/null
+++ b/clang/lib/Headers/__clang_hip_cmath.h
@@ -0,0 +1,664 @@
+/*===---- __clang_hip_cmath.h - HIP cmath decls -----------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_CMATH_H__
+#define __CLANG_HIP_CMATH_H__
+
+#if !defined(__HIP__)
+#error "This file is for HIP and OpenMP AMDGCN device compilation only."
+#endif
+
+#if defined(__cplusplus)
+#include <limits>
+#include <type_traits>
+#include <utility>
+#endif
+#include <limits.h>
+#include <stdint.h>
+
+#pragma push_macro("__DEVICE__")
+#define __DEVICE__ static __device__ inline __attribute__((always_inline))
+
+// Start with functions that cannot be defined by DEF macros below.
+#if defined(__cplusplus)
+__DEVICE__ double abs(double __x) { return ::fabs(__x); }
+__DEVICE__ float abs(float __x) { return ::fabsf(__x); }
+__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
+__DEVICE__ long abs(long __n) { return ::labs(__n); }
+__DEVICE__ float fma(float __x, float __y, float __z) {
+ return ::fmaf(__x, __y, __z);
+}
+__DEVICE__ int fpclassify(float __x) {
+ return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
+ FP_ZERO, __x);
+}
+__DEVICE__ int fpclassify(double __x) {
+ return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
+ FP_ZERO, __x);
+}
+__DEVICE__ float frexp(float __arg, int *__exp) {
+ return ::frexpf(__arg, __exp);
+}
+__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
+__DEVICE__ bool isfinite(double __x) { return ::__finite(__x); }
+__DEVICE__ bool isgreater(float __x, float __y) {
+ return __builtin_isgreater(__x, __y);
+}
+__DEVICE__ bool isgreater(double __x, double __y) {
+ return __builtin_isgreater(__x, __y);
+}
+__DEVICE__ bool isgreaterequal(float __x, float __y) {
+ return __builtin_isgreaterequal(__x, __y);
+}
+__DEVICE__ bool isgreaterequal(double __x, double __y) {
+ return __builtin_isgreaterequal(__x, __y);
+}
+__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
+__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
+__DEVICE__ bool isless(float __x, float __y) {
+ return __builtin_isless(__x, __y);
+}
+__DEVICE__ bool isless(double __x, double __y) {
+ return __builtin_isless(__x, __y);
+}
+__DEVICE__ bool islessequal(float __x, float __y) {
+ return __builtin_islessequal(__x, __y);
+}
+__DEVICE__ bool islessequal(double __x, double __y) {
+ return __builtin_islessequal(__x, __y);
+}
+__DEVICE__ bool islessgreater(float __x, float __y) {
+ return __builtin_islessgreater(__x, __y);
+}
+__DEVICE__ bool islessgreater(double __x, double __y) {
+ return __builtin_islessgreater(__x, __y);
+}
+__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
+__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
+__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); }
+__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); }
+__DEVICE__ bool isunordered(float __x, float __y) {
+ return __builtin_isunordered(__x, __y);
+}
+__DEVICE__ bool isunordered(double __x, double __y) {
+ return __builtin_isunordered(__x, __y);
+}
+__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); }
+__DEVICE__ float pow(float __base, int __iexp) {
+ return ::powif(__base, __iexp);
+}
+__DEVICE__ double pow(double __base, int __iexp) {
+ return ::powi(__base, __iexp);
+}
+__DEVICE__ float remquo(float __x, float __y, int *__quo) {
+ return ::remquof(__x, __y, __quo);
+}
+__DEVICE__ float scalbln(float __x, long int __n) {
+ return ::scalblnf(__x, __n);
+}
+__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); }
+__DEVICE__ bool signbit(double __x) { return ::__signbit(__x); }
+
+// Notably missing above is nexttoward. We omit it because
+// ocml doesn't provide an implementation, and we don't want to be in the
+// business of implementing tricky libm functions in this header.
+
+// Other functions.
+__DEVICE__ _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) {
+ return __ocml_fma_f16(__x, __y, __z);
+}
+__DEVICE__ _Float16 pow(_Float16 __base, int __iexp) {
+ return __ocml_pown_f16(__base, __iexp);
+}
+
+// BEGIN DEF_FUN and HIP_OVERLOAD
+
+// BEGIN DEF_FUN
+
+#pragma push_macro("__DEF_FUN1")
+#pragma push_macro("__DEF_FUN2")
+#pragma push_macro("__DEF_FUN2_FI")
+
+// Define cmath functions with float argument and returns __retty.
+#define __DEF_FUN1(__retty, __func) \
+ __DEVICE__ \
+ __retty __func(float __x) { return __func##f(__x); }
+
+// Define cmath functions with two float arguments and returns __retty.
+#define __DEF_FUN2(__retty, __func) \
+ __DEVICE__ \
+ __retty __func(float __x, float __y) { return __func##f(__x, __y); }
+
+// Define cmath functions with a float and an int argument and returns __retty.
+#define __DEF_FUN2_FI(__retty, __func) \
+ __DEVICE__ \
+ __retty __func(float __x, int __y) { return __func##f(__x, __y); }
+
+__DEF_FUN1(float, acos)
+__DEF_FUN1(float, acosh)
+__DEF_FUN1(float, asin)
+__DEF_FUN1(float, asinh)
+__DEF_FUN1(float, atan)
+__DEF_FUN2(float, atan2)
+__DEF_FUN1(float, atanh)
+__DEF_FUN1(float, cbrt)
+__DEF_FUN1(float, ceil)
+__DEF_FUN2(float, copysign)
+__DEF_FUN1(float, cos)
+__DEF_FUN1(float, cosh)
+__DEF_FUN1(float, erf)
+__DEF_FUN1(float, erfc)
+__DEF_FUN1(float, exp)
+__DEF_FUN1(float, exp2)
+__DEF_FUN1(float, expm1)
+__DEF_FUN1(float, fabs)
+__DEF_FUN2(float, fdim)
+__DEF_FUN1(float, floor)
+__DEF_FUN2(float, fmax)
+__DEF_FUN2(float, fmin)
+__DEF_FUN2(float, fmod)
+__DEF_FUN2(float, hypot)
+__DEF_FUN1(int, ilogb)
+__DEF_FUN2_FI(float, ldexp)
+__DEF_FUN1(float, lgamma)
+__DEF_FUN1(float, log)
+__DEF_FUN1(float, log10)
+__DEF_FUN1(float, log1p)
+__DEF_FUN1(float, log2)
+__DEF_FUN1(float, logb)
+__DEF_FUN1(long long, llrint)
+__DEF_FUN1(long long, llround)
+__DEF_FUN1(long, lrint)
+__DEF_FUN1(long, lround)
+__DEF_FUN1(float, nearbyint)
+__DEF_FUN2(float, nextafter)
+__DEF_FUN2(float, pow)
+__DEF_FUN2(float, remainder)
+__DEF_FUN1(float, rint)
+__DEF_FUN1(float, round)
+__DEF_FUN2_FI(float, scalbn)
+__DEF_FUN1(float, sin)
+__DEF_FUN1(float, sinh)
+__DEF_FUN1(float, sqrt)
+__DEF_FUN1(float, tan)
+__DEF_FUN1(float, tanh)
+__DEF_FUN1(float, tgamma)
+__DEF_FUN1(float, trunc)
+
+#pragma pop_macro("__DEF_FUN1")
+#pragma pop_macro("__DEF_FUN2")
+#pragma pop_macro("__DEF_FUN2_FI")
+
+// END DEF_FUN
+
+// BEGIN HIP_OVERLOAD
+
+#pragma push_macro("__HIP_OVERLOAD1")
+#pragma push_macro("__HIP_OVERLOAD2")
+
+// __hip_enable_if::type is a type function which returns __T if __B is true.
+template <bool __B, class __T = void> struct __hip_enable_if {};
+
+template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
+
+// decltype is only available in C++11 and above.
+#if __cplusplus >= 201103L
+// __hip_promote
+namespace __hip {
+
+template <class _Tp> struct __numeric_type {
+ static void __test(...);
+ static _Float16 __test(_Float16);
+ static float __test(float);
+ static double __test(char);
+ static double __test(int);
+ static double __test(unsigned);
+ static double __test(long);
+ static double __test(unsigned long);
+ static double __test(long long);
+ static double __test(unsigned long long);
+ static double __test(double);
+ // No support for long double, use double instead.
+ static double __test(long double);
+
+ typedef decltype(__test(std::declval<_Tp>())) type;
+ static const bool value = !std::is_same<type, void>::value;
+};
+
+template <> struct __numeric_type<void> { static const bool value = true; };
+
+template <class _A1, class _A2 = void, class _A3 = void,
+ bool = __numeric_type<_A1>::value &&__numeric_type<_A2>::value
+ &&__numeric_type<_A3>::value>
+class __promote_imp {
+public:
+ static const bool value = false;
+};
+
+template <class _A1, class _A2, class _A3>
+class __promote_imp<_A1, _A2, _A3, true> {
+private:
+ typedef typename __promote_imp<_A1>::type __type1;
+ typedef typename __promote_imp<_A2>::type __type2;
+ typedef typename __promote_imp<_A3>::type __type3;
+
+public:
+ typedef decltype(__type1() + __type2() + __type3()) type;
+ static const bool value = true;
+};
+
+template <class _A1, class _A2> class __promote_imp<_A1, _A2, void, true> {
+private:
+ typedef typename __promote_imp<_A1>::type __type1;
+ typedef typename __promote_imp<_A2>::type __type2;
+
+public:
+ typedef decltype(__type1() + __type2()) type;
+ static const bool value = true;
+};
+
+template <class _A1> class __promote_imp<_A1, void, void, true> {
+public:
+ typedef typename __numeric_type<_A1>::type type;
+ static const bool value = true;
+};
+
+template <class _A1, class _A2 = void, class _A3 = void>
+class __promote : public __promote_imp<_A1, _A2, _A3> {};
+
+} // namespace __hip
+#endif //__cplusplus >= 201103L
+
+// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
+// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
+// floor(double).
+#define __HIP_OVERLOAD1(__retty, __fn) \
+ template <typename __T> \
+ __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer, \
+ __retty>::type \
+ __fn(__T __x) { \
+ return ::__fn((double)__x); \
+ }
+
+// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
+// or integer argument to avoid compilation error due to ambibuity. e.g.
+// max(5.0f, 6.0) is resolved with max(double, double).
+#if __cplusplus >= 201103L
+#define __HIP_OVERLOAD2(__retty, __fn) \
+ template <typename __T1, typename __T2> \
+ __DEVICE__ typename __hip_enable_if< \
+ std::numeric_limits<__T1>::is_specialized && \
+ std::numeric_limits<__T2>::is_specialized, \
+ typename __hip::__promote<__T1, __T2>::type>::type \
+ __fn(__T1 __x, __T2 __y) { \
+ typedef typename __hip::__promote<__T1, __T2>::type __result_type; \
+ return __fn((__result_type)__x, (__result_type)__y); \
+ }
+#else
+#define __HIP_OVERLOAD2(__retty, __fn) \
+ template <typename __T1, typename __T2> \
+ __DEVICE__ \
+ typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized && \
+ std::numeric_limits<__T2>::is_specialized, \
+ __retty>::type \
+ __fn(__T1 __x, __T2 __y) { \
+ return __fn((double)__x, (double)__y); \
+ }
+#endif
+
+__HIP_OVERLOAD1(double, abs)
+__HIP_OVERLOAD1(double, acos)
+__HIP_OVERLOAD1(double, acosh)
+__HIP_OVERLOAD1(double, asin)
+__HIP_OVERLOAD1(double, asinh)
+__HIP_OVERLOAD1(double, atan)
+__HIP_OVERLOAD2(double, atan2)
+__HIP_OVERLOAD1(double, atanh)
+__HIP_OVERLOAD1(double, cbrt)
+__HIP_OVERLOAD1(double, ceil)
+__HIP_OVERLOAD2(double, copysign)
+__HIP_OVERLOAD1(double, cos)
+__HIP_OVERLOAD1(double, cosh)
+__HIP_OVERLOAD1(double, erf)
+__HIP_OVERLOAD1(double, erfc)
+__HIP_OVERLOAD1(double, exp)
+__HIP_OVERLOAD1(double, exp2)
+__HIP_OVERLOAD1(double, expm1)
+__HIP_OVERLOAD1(double, fabs)
+__HIP_OVERLOAD2(double, fdim)
+__HIP_OVERLOAD1(double, floor)
+__HIP_OVERLOAD2(double, fmax)
+__HIP_OVERLOAD2(double, fmin)
+__HIP_OVERLOAD2(double, fmod)
+__HIP_OVERLOAD1(int, fpclassify)
+__HIP_OVERLOAD2(double, hypot)
+__HIP_OVERLOAD1(int, ilogb)
+__HIP_OVERLOAD1(bool, isfinite)
+__HIP_OVERLOAD2(bool, isgreater)
+__HIP_OVERLOAD2(bool, isgreaterequal)
+__HIP_OVERLOAD1(bool, isinf)
+__HIP_OVERLOAD2(bool, isless)
+__HIP_OVERLOAD2(bool, islessequal)
+__HIP_OVERLOAD2(bool, islessgreater)
+__HIP_OVERLOAD1(bool, isnan)
+__HIP_OVERLOAD1(bool, isnormal)
+__HIP_OVERLOAD2(bool, isunordered)
+__HIP_OVERLOAD1(double, lgamma)
+__HIP_OVERLOAD1(double, log)
+__HIP_OVERLOAD1(double, log10)
+__HIP_OVERLOAD1(double, log1p)
+__HIP_OVERLOAD1(double, log2)
+__HIP_OVERLOAD1(double, logb)
+__HIP_OVERLOAD1(long long, llrint)
+__HIP_OVERLOAD1(long long, llround)
+__HIP_OVERLOAD1(long, lrint)
+__HIP_OVERLOAD1(long, lround)
+__HIP_OVERLOAD1(double, nearbyint)
+__HIP_OVERLOAD2(double, nextafter)
+__HIP_OVERLOAD2(double, pow)
+__HIP_OVERLOAD2(double, remainder)
+__HIP_OVERLOAD1(double, rint)
+__HIP_OVERLOAD1(double, round)
+__HIP_OVERLOAD1(bool, signbit)
+__HIP_OVERLOAD1(double, sin)
+__HIP_OVERLOAD1(double, sinh)
+__HIP_OVERLOAD1(double, sqrt)
+__HIP_OVERLOAD1(double, tan)
+__HIP_OVERLOAD1(double, tanh)
+__HIP_OVERLOAD1(double, tgamma)
+__HIP_OVERLOAD1(double, trunc)
+
+// Overload these but don't add them to std, they are not part of cmath.
+__HIP_OVERLOAD2(double, max)
+__HIP_OVERLOAD2(double, min)
+
+// Additional Overloads that don't quite match HIP_OVERLOAD.
+#if __cplusplus >= 201103L
+template <typename __T1, typename __T2, typename __T3>
+__DEVICE__ typename __hip_enable_if<
+ std::numeric_limits<__T1>::is_specialized &&
+ std::numeric_limits<__T2>::is_specialized &&
+ std::numeric_limits<__T3>::is_specialized,
+ typename __hip::__promote<__T1, __T2, __T3>::type>::type
+fma(__T1 __x, __T2 __y, __T3 __z) {
+ typedef typename __hip::__promote<__T1, __T2, __T3>::type __result_type;
+ return ::fma((__result_type)__x, (__result_type)__y, (__result_type)__z);
+}
+#else
+template <typename __T1, typename __T2, typename __T3>
+__DEVICE__
+ typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&
+ std::numeric_limits<__T2>::is_specialized &&
+ std::numeric_limits<__T3>::is_specialized,
+ double>::type
+ fma(__T1 __x, __T2 __y, __T3 __z) {
+ return ::fma((double)__x, (double)__y, (double)__z);
+}
+#endif
+
+template <typename __T>
+__DEVICE__
+ typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+ frexp(__T __x, int *__exp) {
+ return ::frexp((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__
+ typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+ ldexp(__T __x, int __exp) {
+ return ::ldexp((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__
+ typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+ modf(__T __x, double *__exp) {
+ return ::modf((double)__x, __exp);
+}
+
+#if __cplusplus >= 201103L
+template <typename __T1, typename __T2>
+__DEVICE__
+ typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&
+ std::numeric_limits<__T2>::is_specialized,
+ typename __hip::__promote<__T1, __T2>::type>::type
+ remquo(__T1 __x, __T2 __y, int *__quo) {
+ typedef typename __hip::__promote<__T1, __T2>::type __result_type;
+ return ::remquo((__result_type)__x, (__result_type)__y, __quo);
+}
+#else
+template <typename __T1, typename __T2>
+__DEVICE__
+ typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&
+ std::numeric_limits<__T2>::is_specialized,
+ double>::type
+ remquo(__T1 __x, __T2 __y, int *__quo) {
+ return ::remquo((double)__x, (double)__y, __quo);
+}
+#endif
+
+template <typename __T>
+__DEVICE__
+ typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+ scalbln(__T __x, long int __exp) {
+ return ::scalbln((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__
+ typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+ scalbn(__T __x, int __exp) {
+ return ::scalbn((double)__x, __exp);
+}
+
+#pragma pop_macro("__HIP_OVERLOAD1")
+#pragma pop_macro("__HIP_OVERLOAD2")
+
+// END HIP_OVERLOAD
+
+// END DEF_FUN and HIP_OVERLOAD
+
+#endif // defined(__cplusplus)
+
+// Define these overloads inside the namespace our standard library uses.
+#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
+_LIBCPP_BEGIN_NAMESPACE_STD
+#else
+namespace std {
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+#endif
+#endif
+
+// Pull the new overloads we defined above into namespace std.
+// using ::abs; - This may be considered for C++.
+using ::acos;
+using ::acosh;
+using ::asin;
+using ::asinh;
+using ::atan;
+using ::atan2;
+using ::atanh;
+using ::cbrt;
+using ::ceil;
+using ::copysign;
+using ::cos;
+using ::cosh;
+using ::erf;
+using ::erfc;
+using ::exp;
+using ::exp2;
+using ::expm1;
+using ::fabs;
+using ::fdim;
+using ::floor;
+using ::fma;
+using ::fmax;
+using ::fmin;
+using ::fmod;
+using ::fpclassify;
+using ::frexp;
+using ::hypot;
+using ::ilogb;
+using ::isfinite;
+using ::isgreater;
+using ::isgreaterequal;
+using ::isless;
+using ::islessequal;
+using ::islessgreater;
+using ::isnormal;
+using ::isunordered;
+using ::ldexp;
+using ::lgamma;
+using ::llrint;
+using ::llround;
+using ::log;
+using ::log10;
+using ::log1p;
+using ::log2;
+using ::logb;
+using ::lrint;
+using ::lround;
+using ::modf;
+// using ::nan; - This may be considered for C++.
+// using ::nanf; - This may be considered for C++.
+// using ::nanl; - This is not yet defined.
+using ::nearbyint;
+using ::nextafter;
+// using ::nexttoward; - Omit this since we do not have a definition.
+using ::pow;
+using ::remainder;
+using ::remquo;
+using ::rint;
+using ::round;
+using ::scalbln;
+using ::scalbn;
+using ::signbit;
+using ::sin;
+using ::sinh;
+using ::sqrt;
+using ::tan;
+using ::tanh;
+using ::tgamma;
+using ::trunc;
+
+// Well this is fun: We need to pull these symbols in for libc++, but we can't
+// pull them in with libstdc++, because its ::isinf and ::isnan are different
+// than its std::isinf and std::isnan.
+#ifndef __GLIBCXX__
+using ::isinf;
+using ::isnan;
+#endif
+
+// Finally, pull the "foobarf" functions that HIP defines into std.
+using ::acosf;
+using ::acoshf;
+using ::asinf;
+using ::asinhf;
+using ::atan2f;
+using ::atanf;
+using ::atanhf;
+using ::cbrtf;
+using ::ceilf;
+using ::copysignf;
+using ::cosf;
+using ::coshf;
+using ::erfcf;
+using ::erff;
+using ::exp2f;
+using ::expf;
+using ::expm1f;
+using ::fabsf;
+using ::fdimf;
+using ::floorf;
+using ::fmaf;
+using ::fmaxf;
+using ::fminf;
+using ::fmodf;
+using ::frexpf;
+using ::hypotf;
+using ::ilogbf;
+using ::ldexpf;
+using ::lgammaf;
+using ::llrintf;
+using ::llroundf;
+using ::log10f;
+using ::log1pf;
+using ::log2f;
+using ::logbf;
+using ::logf;
+using ::lrintf;
+using ::lroundf;
+using ::modff;
+using ::nearbyintf;
+using ::nextafterf;
+// using ::nexttowardf; - Omit this since we do not have a definition.
+using ::powf;
+using ::remainderf;
+using ::remquof;
+using ::rintf;
+using ::roundf;
+using ::scalblnf;
+using ::scalbnf;
+using ::sinf;
+using ::sinhf;
+using ::sqrtf;
+using ::tanf;
+using ::tanhf;
+using ::tgammaf;
+using ::truncf;
+
+#ifdef _LIBCPP_END_NAMESPACE_STD
+_LIBCPP_END_NAMESPACE_STD
+#else
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_END_NAMESPACE_VERSION
+#endif
+} // namespace std
+#endif
+
+// Define device-side math functions from <ymath.h> on MSVC.
+#if defined(_MSC_VER)
+
+// Before VS2019, `<ymath.h>` is also included in `<limits>` and other headers.
+// But, from VS2019, it's only included in `<complex>`. Need to include
+// `<ymath.h>` here to ensure C functions declared there won't be markded as
+// `__host__` and `__device__` through `<complex>` wrapper.
+#include <ymath.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif // defined(__cplusplus)
+__DEVICE__ __attribute__((overloadable)) double _Cosh(double x, double y) {
+ return cosh(x) * y;
+}
+__DEVICE__ __attribute__((overloadable)) float _FCosh(float x, float y) {
+ return coshf(x) * y;
+}
+__DEVICE__ __attribute__((overloadable)) short _Dtest(double *p) {
+ return fpclassify(*p);
+}
+__DEVICE__ __attribute__((overloadable)) short _FDtest(float *p) {
+ return fpclassify(*p);
+}
+__DEVICE__ __attribute__((overloadable)) double _Sinh(double x, double y) {
+ return sinh(x) * y;
+}
+__DEVICE__ __attribute__((overloadable)) float _FSinh(float x, float y) {
+ return sinhf(x) * y;
+}
+#if defined(__cplusplus)
+}
+#endif // defined(__cplusplus)
+#endif // defined(_MSC_VER)
+
+#pragma pop_macro("__DEVICE__")
+
+#endif // __CLANG_HIP_CMATH_H__
diff --git a/clang/lib/Headers/__clang_hip_libdevice_declares.h b/clang/lib/Headers/__clang_hip_libdevice_declares.h
index e1cd49a39c65..ac98907ad5de 100644
--- a/clang/lib/Headers/__clang_hip_libdevice_declares.h
+++ b/clang/lib/Headers/__clang_hip_libdevice_declares.h
@@ -10,7 +10,9 @@
#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
#define __CLANG_HIP_LIBDEVICE_DECLARES_H__
+#ifdef __cplusplus
extern "C" {
+#endif
// BEGIN FLOAT
__device__ __attribute__((const)) float __ocml_acos_f32(float);
@@ -78,6 +80,7 @@ __device__ __attribute__((const)) float __ocml_len4_f32(float, float, float,
__device__ __attribute__((pure)) float __ocml_ncdf_f32(float);
__device__ __attribute__((pure)) float __ocml_ncdfinv_f32(float);
__device__ __attribute__((pure)) float __ocml_pow_f32(float, float);
+__device__ __attribute__((pure)) float __ocml_pown_f32(float, int);
__device__ __attribute__((pure)) float __ocml_rcbrt_f32(float);
__device__ __attribute__((const)) float __ocml_remainder_f32(float, float);
__device__ float __ocml_remquo_f32(float, float,
@@ -126,10 +129,10 @@ __device__ __attribute__((const)) float __ocml_div_rte_f32(float, float);
__device__ __attribute__((const)) float __ocml_div_rtn_f32(float, float);
__device__ __attribute__((const)) float __ocml_div_rtp_f32(float, float);
__device__ __attribute__((const)) float __ocml_div_rtz_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float);
__device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
@@ -205,6 +208,7 @@ __device__ __attribute__((const)) double __ocml_len4_f64(double, double, double,
__device__ __attribute__((pure)) double __ocml_ncdf_f64(double);
__device__ __attribute__((pure)) double __ocml_ncdfinv_f64(double);
__device__ __attribute__((pure)) double __ocml_pow_f64(double, double);
+__device__ __attribute__((pure)) double __ocml_pown_f64(double, int);
__device__ __attribute__((pure)) double __ocml_rcbrt_f64(double);
__device__ __attribute__((const)) double __ocml_remainder_f64(double, double);
__device__ double __ocml_remquo_f64(double, double,
@@ -252,10 +256,10 @@ __device__ __attribute__((const)) double __ocml_div_rte_f64(double, double);
__device__ __attribute__((const)) double __ocml_div_rtn_f64(double, double);
__device__ __attribute__((const)) double __ocml_div_rtp_f64(double, double);
__device__ __attribute__((const)) double __ocml_div_rtz_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double);
__device__ __attribute__((const)) double __ocml_fma_rte_f64(double, double,
double);
__device__ __attribute__((const)) double __ocml_fma_rtn_f64(double, double,
@@ -290,6 +294,7 @@ __device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);
__device__ _Float16 __ocml_sin_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_sqrt_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_trunc_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_pown_f16(_Float16, int);
typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
typedef short __2i16 __attribute__((ext_vector_type(2)));
@@ -313,14 +318,17 @@ __device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
__device__ inline __2f16
__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
{
- return __2f16{__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y)};
+ return (__2f16)(__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y));
}
__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
__device__ __2f16 __ocml_sin_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_sqrt_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_trunc_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_pown_2f16(__2f16, __2i16);
+#ifdef __cplusplus
} // extern "C"
+#endif
#endif // __CLANG_HIP_LIBDEVICE_DECLARES_H__
diff --git a/clang/lib/Headers/__clang_hip_math.h b/clang/lib/Headers/__clang_hip_math.h
index cf7014b9aefe..14d91c66b352 100644
--- a/clang/lib/Headers/__clang_hip_math.h
+++ b/clang/lib/Headers/__clang_hip_math.h
@@ -1,4 +1,4 @@
-/*===---- __clang_hip_math.h - HIP math decls -------------------------------===
+/*===---- __clang_hip_math.h - Device-side HIP math support ----------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
@@ -6,24 +6,57 @@
*
*===-----------------------------------------------------------------------===
*/
-
#ifndef __CLANG_HIP_MATH_H__
#define __CLANG_HIP_MATH_H__
+#if !defined(__HIP__)
+#error "This file is for HIP and OpenMP AMDGCN device compilation only."
+#endif
+
+#if defined(__cplusplus)
#include <algorithm>
+#endif
#include <limits.h>
-#include <limits>
#include <stdint.h>
#pragma push_macro("__DEVICE__")
-#pragma push_macro("__RETURN_TYPE")
+#define __DEVICE__ static __device__ inline __attribute__((always_inline))
-// to be consistent with __clang_cuda_math_forward_declares
-#define __DEVICE__ static __device__
+// A few functions return bool type starting only in C++11.
+#pragma push_macro("__RETURN_TYPE")
+#if defined(__cplusplus)
#define __RETURN_TYPE bool
+#else
+#define __RETURN_TYPE int
+#endif
+
+#if defined (__cplusplus) && __cplusplus < 201103L
+// emulate static_assert on type sizes
+template<bool>
+struct __compare_result{};
+template<>
+struct __compare_result<true> {
+ static const bool valid;
+};
+
+__DEVICE__
+void __suppress_unused_warning(bool b){};
+template <unsigned int S, unsigned int T>
+__DEVICE__ void __static_assert_equal_size() {
+ __suppress_unused_warning(__compare_result<S == T>::valid);
+}
+
+#define __static_assert_type_size_equal(A, B) \
+ __static_assert_equal_size<A,B>()
+
+#else
+#define __static_assert_type_size_equal(A,B) \
+ static_assert((A) == (B), "")
+
+#endif
__DEVICE__
-inline uint64_t __make_mantissa_base8(const char *__tagp) {
+uint64_t __make_mantissa_base8(const char *__tagp) {
uint64_t __r = 0;
while (__tagp) {
char __tmp = *__tagp;
@@ -40,7 +73,7 @@ inline uint64_t __make_mantissa_base8(const char *__tagp) {
}
__DEVICE__
-inline uint64_t __make_mantissa_base10(const char *__tagp) {
+uint64_t __make_mantissa_base10(const char *__tagp) {
uint64_t __r = 0;
while (__tagp) {
char __tmp = *__tagp;
@@ -57,7 +90,7 @@ inline uint64_t __make_mantissa_base10(const char *__tagp) {
}
__DEVICE__
-inline uint64_t __make_mantissa_base16(const char *__tagp) {
+uint64_t __make_mantissa_base16(const char *__tagp) {
uint64_t __r = 0;
while (__tagp) {
char __tmp = *__tagp;
@@ -78,7 +111,7 @@ inline uint64_t __make_mantissa_base16(const char *__tagp) {
}
__DEVICE__
-inline uint64_t __make_mantissa(const char *__tagp) {
+uint64_t __make_mantissa(const char *__tagp) {
if (!__tagp)
return 0u;
@@ -95,78 +128,124 @@ inline uint64_t __make_mantissa(const char *__tagp) {
}
// BEGIN FLOAT
+#if defined(__cplusplus)
__DEVICE__
-inline float abs(float __x) { return __ocml_fabs_f32(__x); }
+int abs(int __x) {
+ int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
__DEVICE__
-inline float acosf(float __x) { return __ocml_acos_f32(__x); }
+long labs(long __x) {
+ long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
__DEVICE__
-inline float acoshf(float __x) { return __ocml_acosh_f32(__x); }
+long long llabs(long long __x) {
+ long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
+#endif
+
__DEVICE__
-inline float asinf(float __x) { return __ocml_asin_f32(__x); }
+float acosf(float __x) { return __ocml_acos_f32(__x); }
+
__DEVICE__
-inline float asinhf(float __x) { return __ocml_asinh_f32(__x); }
+float acoshf(float __x) { return __ocml_acosh_f32(__x); }
+
__DEVICE__
-inline float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
+float asinf(float __x) { return __ocml_asin_f32(__x); }
+
__DEVICE__
-inline float atanf(float __x) { return __ocml_atan_f32(__x); }
+float asinhf(float __x) { return __ocml_asinh_f32(__x); }
+
__DEVICE__
-inline float atanhf(float __x) { return __ocml_atanh_f32(__x); }
+float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
+
__DEVICE__
-inline float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
+float atanf(float __x) { return __ocml_atan_f32(__x); }
+
__DEVICE__
-inline float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+float atanhf(float __x) { return __ocml_atanh_f32(__x); }
+
__DEVICE__
-inline float copysignf(float __x, float __y) {
- return __ocml_copysign_f32(__x, __y);
-}
+float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
+
__DEVICE__
-inline float cosf(float __x) { return __ocml_cos_f32(__x); }
+float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+
__DEVICE__
-inline float coshf(float __x) { return __ocml_cosh_f32(__x); }
+float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); }
+
__DEVICE__
-inline float cospif(float __x) { return __ocml_cospi_f32(__x); }
+float cosf(float __x) { return __ocml_cos_f32(__x); }
+
__DEVICE__
-inline float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
+float coshf(float __x) { return __ocml_cosh_f32(__x); }
+
__DEVICE__
-inline float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
+float cospif(float __x) { return __ocml_cospi_f32(__x); }
+
__DEVICE__
-inline float erfcf(float __x) { return __ocml_erfc_f32(__x); }
+float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
+
__DEVICE__
-inline float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
+float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
+
__DEVICE__
-inline float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
+float erfcf(float __x) { return __ocml_erfc_f32(__x); }
+
__DEVICE__
-inline float erff(float __x) { return __ocml_erf_f32(__x); }
+float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
+
__DEVICE__
-inline float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
+float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
+
__DEVICE__
-inline float exp10f(float __x) { return __ocml_exp10_f32(__x); }
+float erff(float __x) { return __ocml_erf_f32(__x); }
+
__DEVICE__
-inline float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
+
__DEVICE__
-inline float expf(float __x) { return __ocml_exp_f32(__x); }
+float exp10f(float __x) { return __ocml_exp10_f32(__x); }
+
__DEVICE__
-inline float expm1f(float __x) { return __ocml_expm1_f32(__x); }
+float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+
__DEVICE__
-inline float fabsf(float __x) { return __ocml_fabs_f32(__x); }
+float expf(float __x) { return __ocml_exp_f32(__x); }
+
__DEVICE__
-inline float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
+float expm1f(float __x) { return __ocml_expm1_f32(__x); }
+
__DEVICE__
-inline float fdividef(float __x, float __y) { return __x / __y; }
+float fabsf(float __x) { return __ocml_fabs_f32(__x); }
+
__DEVICE__
-inline float floorf(float __x) { return __ocml_floor_f32(__x); }
+float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
+
__DEVICE__
-inline float fmaf(float __x, float __y, float __z) {
+float fdividef(float __x, float __y) { return __x / __y; }
+
+__DEVICE__
+float floorf(float __x) { return __ocml_floor_f32(__x); }
+
+__DEVICE__
+float fmaf(float __x, float __y, float __z) {
return __ocml_fma_f32(__x, __y, __z);
}
+
__DEVICE__
-inline float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+
__DEVICE__
-inline float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+
__DEVICE__
-inline float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
+float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
+
__DEVICE__
-inline float frexpf(float __x, int *__nptr) {
+float frexpf(float __x, int *__nptr) {
int __tmp;
float __r =
__ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
@@ -174,24 +253,31 @@ inline float frexpf(float __x, int *__nptr) {
return __r;
}
+
__DEVICE__
-inline float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
+float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
+
__DEVICE__
-inline int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
+int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
+
__DEVICE__
-inline __RETURN_TYPE isfinite(float __x) { return __ocml_isfinite_f32(__x); }
+__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); }
+
__DEVICE__
-inline __RETURN_TYPE isinf(float __x) { return __ocml_isinf_f32(__x); }
+__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); }
+
__DEVICE__
-inline __RETURN_TYPE isnan(float __x) { return __ocml_isnan_f32(__x); }
+__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); }
+
__DEVICE__
-inline float j0f(float __x) { return __ocml_j0_f32(__x); }
+float j0f(float __x) { return __ocml_j0_f32(__x); }
+
__DEVICE__
-inline float j1f(float __x) { return __ocml_j1_f32(__x); }
+float j1f(float __x) { return __ocml_j1_f32(__x); }
+
__DEVICE__
-inline float jnf(int __n,
- float __x) { // TODO: we could use Ahmes multiplication
- // and the Miller & Brown algorithm
+float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case.
if (__n == 0)
@@ -209,50 +295,61 @@ inline float jnf(int __n,
return __x1;
}
+
__DEVICE__
-inline float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+
__DEVICE__
-inline float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
+float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
+
__DEVICE__
-inline long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+
__DEVICE__
-inline long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+
__DEVICE__
-inline float log10f(float __x) { return __ocml_log10_f32(__x); }
+float log10f(float __x) { return __ocml_log10_f32(__x); }
+
__DEVICE__
-inline float log1pf(float __x) { return __ocml_log1p_f32(__x); }
+float log1pf(float __x) { return __ocml_log1p_f32(__x); }
+
__DEVICE__
-inline float log2f(float __x) { return __ocml_log2_f32(__x); }
+float log2f(float __x) { return __ocml_log2_f32(__x); }
+
__DEVICE__
-inline float logbf(float __x) { return __ocml_logb_f32(__x); }
+float logbf(float __x) { return __ocml_logb_f32(__x); }
+
__DEVICE__
-inline float logf(float __x) { return __ocml_log_f32(__x); }
+float logf(float __x) { return __ocml_log_f32(__x); }
+
__DEVICE__
-inline long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+
__DEVICE__
-inline long int lroundf(float __x) { return __ocml_round_f32(__x); }
+long int lroundf(float __x) { return __ocml_round_f32(__x); }
+
__DEVICE__
-inline float modff(float __x, float *__iptr) {
+float modff(float __x, float *__iptr) {
float __tmp;
float __r =
__ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
*__iptr = __tmp;
-
return __r;
}
+
__DEVICE__
-inline float nanf(const char *__tagp) {
+float nanf(const char *__tagp) {
union {
float val;
struct ieee_float {
- uint32_t mantissa : 22;
- uint32_t quiet : 1;
- uint32_t exponent : 8;
- uint32_t sign : 1;
+ unsigned int mantissa : 22;
+ unsigned int quiet : 1;
+ unsigned int exponent : 8;
+ unsigned int sign : 1;
} bits;
-
- static_assert(sizeof(float) == sizeof(ieee_float), "");
} __tmp;
+ __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));
__tmp.bits.sign = 0u;
__tmp.bits.exponent = ~0u;
@@ -261,28 +358,34 @@ inline float nanf(const char *__tagp) {
return __tmp.val;
}
+
__DEVICE__
-inline float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+
__DEVICE__
-inline float nextafterf(float __x, float __y) {
+float nextafterf(float __x, float __y) {
return __ocml_nextafter_f32(__x, __y);
}
+
__DEVICE__
-inline float norm3df(float __x, float __y, float __z) {
+float norm3df(float __x, float __y, float __z) {
return __ocml_len3_f32(__x, __y, __z);
}
+
__DEVICE__
-inline float norm4df(float __x, float __y, float __z, float __w) {
+float norm4df(float __x, float __y, float __z, float __w) {
return __ocml_len4_f32(__x, __y, __z, __w);
}
+
__DEVICE__
-inline float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
+float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
+
__DEVICE__
-inline float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
+float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
+
__DEVICE__
-inline float
-normf(int __dim,
- const float *__a) { // TODO: placeholder until OCML adds support.
+float normf(int __dim,
+ const float *__a) { // TODO: placeholder until OCML adds support.
float __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
@@ -291,16 +394,23 @@ normf(int __dim,
return __ocml_sqrt_f32(__r);
}
+
__DEVICE__
-inline float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+
+__DEVICE__
+float powif(float __x, int __y) { return __ocml_pown_f32(__x, __y); }
+
__DEVICE__
-inline float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
+float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
+
__DEVICE__
-inline float remainderf(float __x, float __y) {
+float remainderf(float __x, float __y) {
return __ocml_remainder_f32(__x, __y);
}
+
__DEVICE__
-inline float remquof(float __x, float __y, int *__quo) {
+float remquof(float __x, float __y, int *__quo) {
int __tmp;
float __r = __ocml_remquo_f32(
__x, __y, (__attribute__((address_space(5))) int *)&__tmp);
@@ -308,25 +418,26 @@ inline float remquof(float __x, float __y, int *__quo) {
return __r;
}
+
__DEVICE__
-inline float rhypotf(float __x, float __y) {
- return __ocml_rhypot_f32(__x, __y);
-}
+float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); }
+
__DEVICE__
-inline float rintf(float __x) { return __ocml_rint_f32(__x); }
+float rintf(float __x) { return __ocml_rint_f32(__x); }
+
__DEVICE__
-inline float rnorm3df(float __x, float __y, float __z) {
+float rnorm3df(float __x, float __y, float __z) {
return __ocml_rlen3_f32(__x, __y, __z);
}
__DEVICE__
-inline float rnorm4df(float __x, float __y, float __z, float __w) {
+float rnorm4df(float __x, float __y, float __z, float __w) {
return __ocml_rlen4_f32(__x, __y, __z, __w);
}
+
__DEVICE__
-inline float
-rnormf(int __dim,
- const float *__a) { // TODO: placeholder until OCML adds support.
+float rnormf(int __dim,
+ const float *__a) { // TODO: placeholder until OCML adds support.
float __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
@@ -335,59 +446,74 @@ rnormf(int __dim,
return __ocml_rsqrt_f32(__r);
}
+
__DEVICE__
-inline float roundf(float __x) { return __ocml_round_f32(__x); }
+float roundf(float __x) { return __ocml_round_f32(__x); }
+
__DEVICE__
-inline float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
+float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
+
__DEVICE__
-inline float scalblnf(float __x, long int __n) {
+float scalblnf(float __x, long int __n) {
return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
: __ocml_scalb_f32(__x, __n);
}
+
__DEVICE__
-inline float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+
__DEVICE__
-inline __RETURN_TYPE signbit(float __x) { return __ocml_signbit_f32(__x); }
+__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); }
+
__DEVICE__
-inline void sincosf(float __x, float *__sinptr, float *__cosptr) {
+void sincosf(float __x, float *__sinptr, float *__cosptr) {
float __tmp;
-
*__sinptr =
__ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
*__cosptr = __tmp;
}
+
__DEVICE__
-inline void sincospif(float __x, float *__sinptr, float *__cosptr) {
+void sincospif(float __x, float *__sinptr, float *__cosptr) {
float __tmp;
-
*__sinptr = __ocml_sincospi_f32(
__x, (__attribute__((address_space(5))) float *)&__tmp);
*__cosptr = __tmp;
}
+
__DEVICE__
-inline float sinf(float __x) { return __ocml_sin_f32(__x); }
+float sinf(float __x) { return __ocml_sin_f32(__x); }
+
__DEVICE__
-inline float sinhf(float __x) { return __ocml_sinh_f32(__x); }
+float sinhf(float __x) { return __ocml_sinh_f32(__x); }
+
__DEVICE__
-inline float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
+float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
+
__DEVICE__
-inline float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
+float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
+
__DEVICE__
-inline float tanf(float __x) { return __ocml_tan_f32(__x); }
+float tanf(float __x) { return __ocml_tan_f32(__x); }
+
__DEVICE__
-inline float tanhf(float __x) { return __ocml_tanh_f32(__x); }
+float tanhf(float __x) { return __ocml_tanh_f32(__x); }
+
__DEVICE__
-inline float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
+float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
+
__DEVICE__
-inline float truncf(float __x) { return __ocml_trunc_f32(__x); }
+float truncf(float __x) { return __ocml_trunc_f32(__x); }
+
__DEVICE__
-inline float y0f(float __x) { return __ocml_y0_f32(__x); }
+float y0f(float __x) { return __ocml_y0_f32(__x); }
+
__DEVICE__
-inline float y1f(float __x) { return __ocml_y1_f32(__x); }
+float y1f(float __x) { return __ocml_y1_f32(__x); }
+
__DEVICE__
-inline float ynf(int __n,
- float __x) { // TODO: we could use Ahmes multiplication
- // and the Miller & Brown algorithm
+float ynf(int __n, float __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case. Placeholder until OCML adds
// support.
@@ -408,290 +534,343 @@ inline float ynf(int __n,
}
// BEGIN INTRINSICS
+
__DEVICE__
-inline float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
+float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
+
__DEVICE__
-inline float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
+float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
+
__DEVICE__
-inline float __expf(float __x) { return __ocml_native_exp_f32(__x); }
+float __expf(float __x) { return __ocml_native_exp_f32(__x); }
+
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline float __fadd_rd(float __x, float __y) {
- return __ocml_add_rtn_f32(__x, __y);
-}
-#endif
+float __fadd_rd(float __x, float __y) { return __ocml_add_rtn_f32(__x, __y); }
__DEVICE__
-inline float __fadd_rn(float __x, float __y) { return __x + __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
+float __fadd_rn(float __x, float __y) { return __ocml_add_rte_f32(__x, __y); }
__DEVICE__
-inline float __fadd_ru(float __x, float __y) {
- return __ocml_add_rtp_f32(__x, __y);
-}
+float __fadd_ru(float __x, float __y) { return __ocml_add_rtp_f32(__x, __y); }
__DEVICE__
-inline float __fadd_rz(float __x, float __y) {
- return __ocml_add_rtz_f32(__x, __y);
-}
+float __fadd_rz(float __x, float __y) { return __ocml_add_rtz_f32(__x, __y); }
+#else
__DEVICE__
-inline float __fdiv_rd(float __x, float __y) {
- return __ocml_div_rtn_f32(__x, __y);
-}
+float __fadd_rn(float __x, float __y) { return __x + __y; }
#endif
-__DEVICE__
-inline float __fdiv_rn(float __x, float __y) { return __x / __y; }
+
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline float __fdiv_ru(float __x, float __y) {
- return __ocml_div_rtp_f32(__x, __y);
-}
+float __fdiv_rd(float __x, float __y) { return __ocml_div_rtn_f32(__x, __y); }
__DEVICE__
-inline float __fdiv_rz(float __x, float __y) {
- return __ocml_div_rtz_f32(__x, __y);
-}
+float __fdiv_rn(float __x, float __y) { return __ocml_div_rte_f32(__x, __y); }
+__DEVICE__
+float __fdiv_ru(float __x, float __y) { return __ocml_div_rtp_f32(__x, __y); }
+__DEVICE__
+float __fdiv_rz(float __x, float __y) { return __ocml_div_rtz_f32(__x, __y); }
+#else
+__DEVICE__
+float __fdiv_rn(float __x, float __y) { return __x / __y; }
#endif
+
__DEVICE__
-inline float __fdividef(float __x, float __y) { return __x / __y; }
+float __fdividef(float __x, float __y) { return __x / __y; }
+
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline float __fmaf_rd(float __x, float __y, float __z) {
+float __fmaf_rd(float __x, float __y, float __z) {
return __ocml_fma_rtn_f32(__x, __y, __z);
}
-#endif
__DEVICE__
-inline float __fmaf_rn(float __x, float __y, float __z) {
- return __ocml_fma_f32(__x, __y, __z);
+float __fmaf_rn(float __x, float __y, float __z) {
+ return __ocml_fma_rte_f32(__x, __y, __z);
}
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline float __fmaf_ru(float __x, float __y, float __z) {
+float __fmaf_ru(float __x, float __y, float __z) {
return __ocml_fma_rtp_f32(__x, __y, __z);
}
__DEVICE__
-inline float __fmaf_rz(float __x, float __y, float __z) {
+float __fmaf_rz(float __x, float __y, float __z) {
return __ocml_fma_rtz_f32(__x, __y, __z);
}
+#else
__DEVICE__
-inline float __fmul_rd(float __x, float __y) {
- return __ocml_mul_rtn_f32(__x, __y);
+float __fmaf_rn(float __x, float __y, float __z) {
+ return __ocml_fma_f32(__x, __y, __z);
}
#endif
-__DEVICE__
-inline float __fmul_rn(float __x, float __y) { return __x * __y; }
+
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline float __fmul_ru(float __x, float __y) {
- return __ocml_mul_rtp_f32(__x, __y);
-}
+float __fmul_rd(float __x, float __y) { return __ocml_mul_rtn_f32(__x, __y); }
__DEVICE__
-inline float __fmul_rz(float __x, float __y) {
- return __ocml_mul_rtz_f32(__x, __y);
-}
+float __fmul_rn(float __x, float __y) { return __ocml_mul_rte_f32(__x, __y); }
__DEVICE__
-inline float __frcp_rd(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
-#endif
+float __fmul_ru(float __x, float __y) { return __ocml_mul_rtp_f32(__x, __y); }
+__DEVICE__
+float __fmul_rz(float __x, float __y) { return __ocml_mul_rtz_f32(__x, __y); }
+#else
__DEVICE__
-inline float __frcp_rn(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+float __fmul_rn(float __x, float __y) { return __x * __y; }
+#endif
+
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline float __frcp_ru(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+float __frcp_rd(float __x) { return __ocml_div_rtn_f32(1.0f, __x); }
__DEVICE__
-inline float __frcp_rz(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
-#endif
+float __frcp_rn(float __x) { return __ocml_div_rte_f32(1.0f, __x); }
__DEVICE__
-inline float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
+float __frcp_ru(float __x) { return __ocml_div_rtp_f32(1.0f, __x); }
+__DEVICE__
+float __frcp_rz(float __x) { return __ocml_div_rtz_f32(1.0f, __x); }
+#else
__DEVICE__
-inline float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
+float __frcp_rn(float __x) { return 1.0f / __x; }
#endif
+
__DEVICE__
-inline float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
+float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
+
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
+float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
__DEVICE__
-inline float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
+float __fsqrt_rn(float __x) { return __ocml_sqrt_rte_f32(__x); }
__DEVICE__
-inline float __fsub_rd(float __x, float __y) {
- return __ocml_sub_rtn_f32(__x, __y);
-}
-#endif
+float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
+__DEVICE__
+float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
+#else
__DEVICE__
-inline float __fsub_rn(float __x, float __y) { return __x - __y; }
+float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
+#endif
+
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline float __fsub_ru(float __x, float __y) {
- return __ocml_sub_rtp_f32(__x, __y);
-}
+float __fsub_rd(float __x, float __y) { return __ocml_sub_rtn_f32(__x, __y); }
__DEVICE__
-inline float __fsub_rz(float __x, float __y) {
- return __ocml_sub_rtz_f32(__x, __y);
-}
+float __fsub_rn(float __x, float __y) { return __ocml_sub_rte_f32(__x, __y); }
+__DEVICE__
+float __fsub_ru(float __x, float __y) { return __ocml_sub_rtp_f32(__x, __y); }
+__DEVICE__
+float __fsub_rz(float __x, float __y) { return __ocml_sub_rtz_f32(__x, __y); }
+#else
+__DEVICE__
+float __fsub_rn(float __x, float __y) { return __x - __y; }
#endif
+
__DEVICE__
-inline float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
+float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
+
__DEVICE__
-inline float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
+float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
+
__DEVICE__
-inline float __logf(float __x) { return __ocml_native_log_f32(__x); }
+float __logf(float __x) { return __ocml_native_log_f32(__x); }
+
__DEVICE__
-inline float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+
__DEVICE__
-inline float __saturatef(float __x) {
- return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x);
-}
+float __saturatef(float __x) { return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x); }
+
__DEVICE__
-inline void __sincosf(float __x, float *__sinptr, float *__cosptr) {
+void __sincosf(float __x, float *__sinptr, float *__cosptr) {
*__sinptr = __ocml_native_sin_f32(__x);
*__cosptr = __ocml_native_cos_f32(__x);
}
+
__DEVICE__
-inline float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
+float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
+
__DEVICE__
-inline float __tanf(float __x) { return __ocml_tan_f32(__x); }
+float __tanf(float __x) { return __ocml_tan_f32(__x); }
// END INTRINSICS
// END FLOAT
// BEGIN DOUBLE
__DEVICE__
-inline double abs(double __x) { return __ocml_fabs_f64(__x); }
-__DEVICE__
-inline double acos(double __x) { return __ocml_acos_f64(__x); }
+double acos(double __x) { return __ocml_acos_f64(__x); }
+
__DEVICE__
-inline double acosh(double __x) { return __ocml_acosh_f64(__x); }
+double acosh(double __x) { return __ocml_acosh_f64(__x); }
+
__DEVICE__
-inline double asin(double __x) { return __ocml_asin_f64(__x); }
+double asin(double __x) { return __ocml_asin_f64(__x); }
+
__DEVICE__
-inline double asinh(double __x) { return __ocml_asinh_f64(__x); }
+double asinh(double __x) { return __ocml_asinh_f64(__x); }
+
__DEVICE__
-inline double atan(double __x) { return __ocml_atan_f64(__x); }
+double atan(double __x) { return __ocml_atan_f64(__x); }
+
__DEVICE__
-inline double atan2(double __x, double __y) {
- return __ocml_atan2_f64(__x, __y);
-}
+double atan2(double __x, double __y) { return __ocml_atan2_f64(__x, __y); }
+
__DEVICE__
-inline double atanh(double __x) { return __ocml_atanh_f64(__x); }
+double atanh(double __x) { return __ocml_atanh_f64(__x); }
+
__DEVICE__
-inline double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
+double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
+
__DEVICE__
-inline double ceil(double __x) { return __ocml_ceil_f64(__x); }
+double ceil(double __x) { return __ocml_ceil_f64(__x); }
+
__DEVICE__
-inline double copysign(double __x, double __y) {
+double copysign(double __x, double __y) {
return __ocml_copysign_f64(__x, __y);
}
+
__DEVICE__
-inline double cos(double __x) { return __ocml_cos_f64(__x); }
+double cos(double __x) { return __ocml_cos_f64(__x); }
+
__DEVICE__
-inline double cosh(double __x) { return __ocml_cosh_f64(__x); }
+double cosh(double __x) { return __ocml_cosh_f64(__x); }
+
__DEVICE__
-inline double cospi(double __x) { return __ocml_cospi_f64(__x); }
+double cospi(double __x) { return __ocml_cospi_f64(__x); }
+
__DEVICE__
-inline double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
+double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
+
__DEVICE__
-inline double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
+double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
+
__DEVICE__
-inline double erf(double __x) { return __ocml_erf_f64(__x); }
+double erf(double __x) { return __ocml_erf_f64(__x); }
+
__DEVICE__
-inline double erfc(double __x) { return __ocml_erfc_f64(__x); }
+double erfc(double __x) { return __ocml_erfc_f64(__x); }
+
__DEVICE__
-inline double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
+double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
+
__DEVICE__
-inline double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
+double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
+
__DEVICE__
-inline double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
+double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
+
__DEVICE__
-inline double exp(double __x) { return __ocml_exp_f64(__x); }
+double exp(double __x) { return __ocml_exp_f64(__x); }
+
__DEVICE__
-inline double exp10(double __x) { return __ocml_exp10_f64(__x); }
+double exp10(double __x) { return __ocml_exp10_f64(__x); }
+
__DEVICE__
-inline double exp2(double __x) { return __ocml_exp2_f64(__x); }
+double exp2(double __x) { return __ocml_exp2_f64(__x); }
+
__DEVICE__
-inline double expm1(double __x) { return __ocml_expm1_f64(__x); }
+double expm1(double __x) { return __ocml_expm1_f64(__x); }
+
__DEVICE__
-inline double fabs(double __x) { return __ocml_fabs_f64(__x); }
+double fabs(double __x) { return __ocml_fabs_f64(__x); }
+
__DEVICE__
-inline double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
+double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
+
__DEVICE__
-inline double floor(double __x) { return __ocml_floor_f64(__x); }
+double floor(double __x) { return __ocml_floor_f64(__x); }
+
__DEVICE__
-inline double fma(double __x, double __y, double __z) {
+double fma(double __x, double __y, double __z) {
return __ocml_fma_f64(__x, __y, __z);
}
+
__DEVICE__
-inline double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+
__DEVICE__
-inline double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+
__DEVICE__
-inline double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
+double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
+
__DEVICE__
-inline double frexp(double __x, int *__nptr) {
+double frexp(double __x, int *__nptr) {
int __tmp;
double __r =
__ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
*__nptr = __tmp;
-
return __r;
}
+
__DEVICE__
-inline double hypot(double __x, double __y) {
- return __ocml_hypot_f64(__x, __y);
-}
+double hypot(double __x, double __y) { return __ocml_hypot_f64(__x, __y); }
+
__DEVICE__
-inline int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
+int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
+
__DEVICE__
-inline __RETURN_TYPE isfinite(double __x) { return __ocml_isfinite_f64(__x); }
+__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); }
+
__DEVICE__
-inline __RETURN_TYPE isinf(double __x) { return __ocml_isinf_f64(__x); }
+__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); }
+
__DEVICE__
-inline __RETURN_TYPE isnan(double __x) { return __ocml_isnan_f64(__x); }
+__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); }
+
__DEVICE__
-inline double j0(double __x) { return __ocml_j0_f64(__x); }
+double j0(double __x) { return __ocml_j0_f64(__x); }
+
__DEVICE__
-inline double j1(double __x) { return __ocml_j1_f64(__x); }
+double j1(double __x) { return __ocml_j1_f64(__x); }
+
__DEVICE__
-inline double jn(int __n,
- double __x) { // TODO: we could use Ahmes multiplication
- // and the Miller & Brown algorithm
+double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case. Placeholder until OCML adds
// support.
if (__n == 0)
- return j0f(__x);
+ return j0(__x);
if (__n == 1)
- return j1f(__x);
+ return j1(__x);
- double __x0 = j0f(__x);
- double __x1 = j1f(__x);
+ double __x0 = j0(__x);
+ double __x1 = j1(__x);
for (int __i = 1; __i < __n; ++__i) {
double __x2 = (2 * __i) / __x * __x1 - __x0;
__x0 = __x1;
__x1 = __x2;
}
-
return __x1;
}
+
__DEVICE__
-inline double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+
__DEVICE__
-inline double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
+double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
+
__DEVICE__
-inline long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+
__DEVICE__
-inline long long int llround(double __x) { return __ocml_round_f64(__x); }
+long long int llround(double __x) { return __ocml_round_f64(__x); }
+
__DEVICE__
-inline double log(double __x) { return __ocml_log_f64(__x); }
+double log(double __x) { return __ocml_log_f64(__x); }
+
__DEVICE__
-inline double log10(double __x) { return __ocml_log10_f64(__x); }
+double log10(double __x) { return __ocml_log10_f64(__x); }
+
__DEVICE__
-inline double log1p(double __x) { return __ocml_log1p_f64(__x); }
+double log1p(double __x) { return __ocml_log1p_f64(__x); }
+
__DEVICE__
-inline double log2(double __x) { return __ocml_log2_f64(__x); }
+double log2(double __x) { return __ocml_log2_f64(__x); }
+
__DEVICE__
-inline double logb(double __x) { return __ocml_logb_f64(__x); }
+double logb(double __x) { return __ocml_logb_f64(__x); }
+
__DEVICE__
-inline long int lrint(double __x) { return __ocml_rint_f64(__x); }
+long int lrint(double __x) { return __ocml_rint_f64(__x); }
+
__DEVICE__
-inline long int lround(double __x) { return __ocml_round_f64(__x); }
+long int lround(double __x) { return __ocml_round_f64(__x); }
+
__DEVICE__
-inline double modf(double __x, double *__iptr) {
+double modf(double __x, double *__iptr) {
double __tmp;
double __r =
__ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
@@ -699,8 +878,9 @@ inline double modf(double __x, double *__iptr) {
return __r;
}
+
__DEVICE__
-inline double nan(const char *__tagp) {
+double nan(const char *__tagp) {
#if !_WIN32
union {
double val;
@@ -710,8 +890,8 @@ inline double nan(const char *__tagp) {
uint32_t exponent : 11;
uint32_t sign : 1;
} bits;
- static_assert(sizeof(double) == sizeof(ieee_double), "");
} __tmp;
+ __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));
__tmp.bits.sign = 0u;
__tmp.bits.exponent = ~0u;
@@ -720,22 +900,24 @@ inline double nan(const char *__tagp) {
return __tmp.val;
#else
- static_assert(sizeof(uint64_t) == sizeof(double));
- uint64_t val = __make_mantissa(__tagp);
- val |= 0xFFF << 51;
- return *reinterpret_cast<double *>(&val);
+ __static_assert_type_size_equal(sizeof(uint64_t), sizeof(double));
+ uint64_t __val = __make_mantissa(__tagp);
+ __val |= 0xFFF << 51;
+ return *reinterpret_cast<double *>(&__val);
#endif
}
+
__DEVICE__
-inline double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+
__DEVICE__
-inline double nextafter(double __x, double __y) {
+double nextafter(double __x, double __y) {
return __ocml_nextafter_f64(__x, __y);
}
+
__DEVICE__
-inline double
-norm(int __dim,
- const double *__a) { // TODO: placeholder until OCML adds support.
+double norm(int __dim,
+ const double *__a) { // TODO: placeholder until OCML adds support.
double __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
@@ -744,28 +926,39 @@ norm(int __dim,
return __ocml_sqrt_f64(__r);
}
+
__DEVICE__
-inline double norm3d(double __x, double __y, double __z) {
+double norm3d(double __x, double __y, double __z) {
return __ocml_len3_f64(__x, __y, __z);
}
+
__DEVICE__
-inline double norm4d(double __x, double __y, double __z, double __w) {
+double norm4d(double __x, double __y, double __z, double __w) {
return __ocml_len4_f64(__x, __y, __z, __w);
}
+
+__DEVICE__
+double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
+
__DEVICE__
-inline double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
+double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
+
__DEVICE__
-inline double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
+double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
+
__DEVICE__
-inline double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
+double powi(double __x, int __y) { return __ocml_pown_f64(__x, __y); }
+
__DEVICE__
-inline double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
+double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
+
__DEVICE__
-inline double remainder(double __x, double __y) {
+double remainder(double __x, double __y) {
return __ocml_remainder_f64(__x, __y);
}
+
__DEVICE__
-inline double remquo(double __x, double __y, int *__quo) {
+double remquo(double __x, double __y, int *__quo) {
int __tmp;
double __r = __ocml_remquo_f64(
__x, __y, (__attribute__((address_space(5))) int *)&__tmp);
@@ -773,16 +966,16 @@ inline double remquo(double __x, double __y, int *__quo) {
return __r;
}
+
__DEVICE__
-inline double rhypot(double __x, double __y) {
- return __ocml_rhypot_f64(__x, __y);
-}
+double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); }
+
__DEVICE__
-inline double rint(double __x) { return __ocml_rint_f64(__x); }
+double rint(double __x) { return __ocml_rint_f64(__x); }
+
__DEVICE__
-inline double
-rnorm(int __dim,
- const double *__a) { // TODO: placeholder until OCML adds support.
+double rnorm(int __dim,
+ const double *__a) { // TODO: placeholder until OCML adds support.
double __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
@@ -791,77 +984,93 @@ rnorm(int __dim,
return __ocml_rsqrt_f64(__r);
}
+
__DEVICE__
-inline double rnorm3d(double __x, double __y, double __z) {
+double rnorm3d(double __x, double __y, double __z) {
return __ocml_rlen3_f64(__x, __y, __z);
}
+
__DEVICE__
-inline double rnorm4d(double __x, double __y, double __z, double __w) {
+double rnorm4d(double __x, double __y, double __z, double __w) {
return __ocml_rlen4_f64(__x, __y, __z, __w);
}
+
__DEVICE__
-inline double round(double __x) { return __ocml_round_f64(__x); }
+double round(double __x) { return __ocml_round_f64(__x); }
+
__DEVICE__
-inline double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
+double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
+
__DEVICE__
-inline double scalbln(double __x, long int __n) {
+double scalbln(double __x, long int __n) {
return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
: __ocml_scalb_f64(__x, __n);
}
__DEVICE__
-inline double scalbn(double __x, int __n) {
- return __ocml_scalbn_f64(__x, __n);
-}
+double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); }
+
__DEVICE__
-inline __RETURN_TYPE signbit(double __x) { return __ocml_signbit_f64(__x); }
+__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); }
+
__DEVICE__
-inline double sin(double __x) { return __ocml_sin_f64(__x); }
+double sin(double __x) { return __ocml_sin_f64(__x); }
+
__DEVICE__
-inline void sincos(double __x, double *__sinptr, double *__cosptr) {
+void sincos(double __x, double *__sinptr, double *__cosptr) {
double __tmp;
*__sinptr = __ocml_sincos_f64(
__x, (__attribute__((address_space(5))) double *)&__tmp);
*__cosptr = __tmp;
}
+
__DEVICE__
-inline void sincospi(double __x, double *__sinptr, double *__cosptr) {
+void sincospi(double __x, double *__sinptr, double *__cosptr) {
double __tmp;
*__sinptr = __ocml_sincospi_f64(
__x, (__attribute__((address_space(5))) double *)&__tmp);
*__cosptr = __tmp;
}
+
__DEVICE__
-inline double sinh(double __x) { return __ocml_sinh_f64(__x); }
+double sinh(double __x) { return __ocml_sinh_f64(__x); }
+
__DEVICE__
-inline double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
+double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
+
__DEVICE__
-inline double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
+double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
+
__DEVICE__
-inline double tan(double __x) { return __ocml_tan_f64(__x); }
+double tan(double __x) { return __ocml_tan_f64(__x); }
+
__DEVICE__
-inline double tanh(double __x) { return __ocml_tanh_f64(__x); }
+double tanh(double __x) { return __ocml_tanh_f64(__x); }
+
__DEVICE__
-inline double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
+double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
+
__DEVICE__
-inline double trunc(double __x) { return __ocml_trunc_f64(__x); }
+double trunc(double __x) { return __ocml_trunc_f64(__x); }
+
__DEVICE__
-inline double y0(double __x) { return __ocml_y0_f64(__x); }
+double y0(double __x) { return __ocml_y0_f64(__x); }
+
__DEVICE__
-inline double y1(double __x) { return __ocml_y1_f64(__x); }
+double y1(double __x) { return __ocml_y1_f64(__x); }
+
__DEVICE__
-inline double yn(int __n,
- double __x) { // TODO: we could use Ahmes multiplication
- // and the Miller & Brown algorithm
+double yn(int __n, double __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case. Placeholder until OCML adds
// support.
if (__n == 0)
- return j0f(__x);
+ return y0(__x);
if (__n == 1)
- return j1f(__x);
+ return y1(__x);
- double __x0 = j0f(__x);
- double __x1 = j1f(__x);
+ double __x0 = y0(__x);
+ double __x1 = y1(__x);
for (int __i = 1; __i < __n; ++__i) {
double __x2 = (2 * __i) / __x * __x1 - __x0;
__x0 = __x1;
@@ -874,296 +1083,182 @@ inline double yn(int __n,
// BEGIN INTRINSICS
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline double __dadd_rd(double __x, double __y) {
+double __dadd_rd(double __x, double __y) {
return __ocml_add_rtn_f64(__x, __y);
}
-#endif
__DEVICE__
-inline double __dadd_rn(double __x, double __y) { return __x + __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
+double __dadd_rn(double __x, double __y) {
+ return __ocml_add_rte_f64(__x, __y);
+}
__DEVICE__
-inline double __dadd_ru(double __x, double __y) {
+double __dadd_ru(double __x, double __y) {
return __ocml_add_rtp_f64(__x, __y);
}
__DEVICE__
-inline double __dadd_rz(double __x, double __y) {
+double __dadd_rz(double __x, double __y) {
return __ocml_add_rtz_f64(__x, __y);
}
+#else
__DEVICE__
-inline double __ddiv_rd(double __x, double __y) {
+double __dadd_rn(double __x, double __y) { return __x + __y; }
+#endif
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __ddiv_rd(double __x, double __y) {
return __ocml_div_rtn_f64(__x, __y);
}
-#endif
__DEVICE__
-inline double __ddiv_rn(double __x, double __y) { return __x / __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
+double __ddiv_rn(double __x, double __y) {
+ return __ocml_div_rte_f64(__x, __y);
+}
__DEVICE__
-inline double __ddiv_ru(double __x, double __y) {
+double __ddiv_ru(double __x, double __y) {
return __ocml_div_rtp_f64(__x, __y);
}
__DEVICE__
-inline double __ddiv_rz(double __x, double __y) {
+double __ddiv_rz(double __x, double __y) {
return __ocml_div_rtz_f64(__x, __y);
}
+#else
+__DEVICE__
+double __ddiv_rn(double __x, double __y) { return __x / __y; }
+#endif
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline double __dmul_rd(double __x, double __y) {
+double __dmul_rd(double __x, double __y) {
return __ocml_mul_rtn_f64(__x, __y);
}
-#endif
__DEVICE__
-inline double __dmul_rn(double __x, double __y) { return __x * __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
+double __dmul_rn(double __x, double __y) {
+ return __ocml_mul_rte_f64(__x, __y);
+}
__DEVICE__
-inline double __dmul_ru(double __x, double __y) {
+double __dmul_ru(double __x, double __y) {
return __ocml_mul_rtp_f64(__x, __y);
}
__DEVICE__
-inline double __dmul_rz(double __x, double __y) {
+double __dmul_rz(double __x, double __y) {
return __ocml_mul_rtz_f64(__x, __y);
}
+#else
__DEVICE__
-inline double __drcp_rd(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+double __dmul_rn(double __x, double __y) { return __x * __y; }
#endif
-__DEVICE__
-inline double __drcp_rn(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline double __drcp_ru(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+double __drcp_rd(double __x) { return __ocml_div_rtn_f64(1.0, __x); }
__DEVICE__
-inline double __drcp_rz(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+double __drcp_rn(double __x) { return __ocml_div_rte_f64(1.0, __x); }
__DEVICE__
-inline double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
-#endif
+double __drcp_ru(double __x) { return __ocml_div_rtp_f64(1.0, __x); }
__DEVICE__
-inline double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
+double __drcp_rz(double __x) { return __ocml_div_rtz_f64(1.0, __x); }
+#else
+__DEVICE__
+double __drcp_rn(double __x) { return 1.0 / __x; }
+#endif
+
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
+double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
+__DEVICE__
+double __dsqrt_rn(double __x) { return __ocml_sqrt_rte_f64(__x); }
+__DEVICE__
+double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
__DEVICE__
-inline double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
+double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
+#else
+__DEVICE__
+double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
+#endif
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline double __dsub_rd(double __x, double __y) {
+double __dsub_rd(double __x, double __y) {
return __ocml_sub_rtn_f64(__x, __y);
}
-#endif
__DEVICE__
-inline double __dsub_rn(double __x, double __y) { return __x - __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
+double __dsub_rn(double __x, double __y) {
+ return __ocml_sub_rte_f64(__x, __y);
+}
__DEVICE__
-inline double __dsub_ru(double __x, double __y) {
+double __dsub_ru(double __x, double __y) {
return __ocml_sub_rtp_f64(__x, __y);
}
__DEVICE__
-inline double __dsub_rz(double __x, double __y) {
+double __dsub_rz(double __x, double __y) {
return __ocml_sub_rtz_f64(__x, __y);
}
+#else
+__DEVICE__
+double __dsub_rn(double __x, double __y) { return __x - __y; }
+#endif
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline double __fma_rd(double __x, double __y, double __z) {
+double __fma_rd(double __x, double __y, double __z) {
return __ocml_fma_rtn_f64(__x, __y, __z);
}
-#endif
__DEVICE__
-inline double __fma_rn(double __x, double __y, double __z) {
- return __ocml_fma_f64(__x, __y, __z);
+double __fma_rn(double __x, double __y, double __z) {
+ return __ocml_fma_rte_f64(__x, __y, __z);
}
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
-inline double __fma_ru(double __x, double __y, double __z) {
+double __fma_ru(double __x, double __y, double __z) {
return __ocml_fma_rtp_f64(__x, __y, __z);
}
__DEVICE__
-inline double __fma_rz(double __x, double __y, double __z) {
+double __fma_rz(double __x, double __y, double __z) {
return __ocml_fma_rtz_f64(__x, __y, __z);
}
+#else
+__DEVICE__
+double __fma_rn(double __x, double __y, double __z) {
+ return __ocml_fma_f64(__x, __y, __z);
+}
#endif
// END INTRINSICS
// END DOUBLE
-// BEGIN INTEGER
-__DEVICE__
-inline int abs(int __x) {
- int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
- return (__x ^ __sgn) - __sgn;
-}
-__DEVICE__
-inline long labs(long __x) {
- long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
- return (__x ^ __sgn) - __sgn;
-}
-__DEVICE__
-inline long long llabs(long long __x) {
- long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
- return (__x ^ __sgn) - __sgn;
-}
+// C only macros
+#if !defined(__cplusplus) && __STDC_VERSION__ >= 201112L
+#define isfinite(__x) _Generic((__x), float : __finitef, double : __finite)(__x)
+#define isinf(__x) _Generic((__x), float : __isinff, double : __isinf)(__x)
+#define isnan(__x) _Generic((__x), float : __isnanf, double : __isnan)(__x)
+#define signbit(__x) \
+ _Generic((__x), float : __signbitf, double : __signbit)(__x)
+#endif // !defined(__cplusplus) && __STDC_VERSION__ >= 201112L
#if defined(__cplusplus)
-__DEVICE__
-inline long abs(long __x) { return labs(__x); }
-__DEVICE__
-inline long long abs(long long __x) { return llabs(__x); }
-#endif
-// END INTEGER
-
-__DEVICE__
-inline _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) {
- return __ocml_fma_f16(__x, __y, __z);
-}
-
-__DEVICE__
-inline float fma(float __x, float __y, float __z) {
- return fmaf(__x, __y, __z);
-}
-
-#pragma push_macro("__DEF_FUN1")
-#pragma push_macro("__DEF_FUN2")
-#pragma push_macro("__DEF_FUNI")
-#pragma push_macro("__DEF_FLOAT_FUN2I")
-#pragma push_macro("__HIP_OVERLOAD1")
-#pragma push_macro("__HIP_OVERLOAD2")
-
-// __hip_enable_if::type is a type function which returns __T if __B is true.
-template <bool __B, class __T = void> struct __hip_enable_if {};
-
-template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
-
-// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
-// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
-// floor(double).
-#define __HIP_OVERLOAD1(__retty, __fn) \
- template <typename __T> \
- __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer, \
- __retty>::type \
- __fn(__T __x) { \
- return ::__fn((double)__x); \
- }
-
-// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
-// or integer argument to avoid compilation error due to ambibuity. e.g.
-// max(5.0f, 6.0) is resolved with max(double, double).
-#define __HIP_OVERLOAD2(__retty, __fn) \
- template <typename __T1, typename __T2> \
- __DEVICE__ \
- typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized && \
- std::numeric_limits<__T2>::is_specialized, \
- __retty>::type \
- __fn(__T1 __x, __T2 __y) { \
- return __fn((double)__x, (double)__y); \
- }
-
-// Define cmath functions with float argument and returns float.
-#define __DEF_FUN1(__retty, __func) \
- __DEVICE__ \
- inline float __func(float __x) { return __func##f(__x); } \
- __HIP_OVERLOAD1(__retty, __func)
-
-// Define cmath functions with float argument and returns __retty.
-#define __DEF_FUNI(__retty, __func) \
- __DEVICE__ \
- inline __retty __func(float __x) { return __func##f(__x); } \
- __HIP_OVERLOAD1(__retty, __func)
-
-// define cmath functions with two float arguments.
-#define __DEF_FUN2(__retty, __func) \
- __DEVICE__ \
- inline float __func(float __x, float __y) { return __func##f(__x, __y); } \
- __HIP_OVERLOAD2(__retty, __func)
-
-__DEF_FUN1(double, acos)
-__DEF_FUN1(double, acosh)
-__DEF_FUN1(double, asin)
-__DEF_FUN1(double, asinh)
-__DEF_FUN1(double, atan)
-__DEF_FUN2(double, atan2);
-__DEF_FUN1(double, atanh)
-__DEF_FUN1(double, cbrt)
-__DEF_FUN1(double, ceil)
-__DEF_FUN2(double, copysign);
-__DEF_FUN1(double, cos)
-__DEF_FUN1(double, cosh)
-__DEF_FUN1(double, erf)
-__DEF_FUN1(double, erfc)
-__DEF_FUN1(double, exp)
-__DEF_FUN1(double, exp2)
-__DEF_FUN1(double, expm1)
-__DEF_FUN1(double, fabs)
-__DEF_FUN2(double, fdim);
-__DEF_FUN1(double, floor)
-__DEF_FUN2(double, fmax);
-__DEF_FUN2(double, fmin);
-__DEF_FUN2(double, fmod);
-//__HIP_OVERLOAD1(int, fpclassify)
-__DEF_FUN2(double, hypot);
-__DEF_FUNI(int, ilogb)
-__HIP_OVERLOAD1(bool, isfinite)
-__HIP_OVERLOAD2(bool, isgreater);
-__HIP_OVERLOAD2(bool, isgreaterequal);
-__HIP_OVERLOAD1(bool, isinf);
-__HIP_OVERLOAD2(bool, isless);
-__HIP_OVERLOAD2(bool, islessequal);
-__HIP_OVERLOAD2(bool, islessgreater);
-__HIP_OVERLOAD1(bool, isnan);
-//__HIP_OVERLOAD1(bool, isnormal)
-__HIP_OVERLOAD2(bool, isunordered);
-__DEF_FUN1(double, lgamma)
-__DEF_FUN1(double, log)
-__DEF_FUN1(double, log10)
-__DEF_FUN1(double, log1p)
-__DEF_FUN1(double, log2)
-__DEF_FUN1(double, logb)
-__DEF_FUNI(long long, llrint)
-__DEF_FUNI(long long, llround)
-__DEF_FUNI(long, lrint)
-__DEF_FUNI(long, lround)
-__DEF_FUN1(double, nearbyint);
-__DEF_FUN2(double, nextafter);
-__DEF_FUN2(double, pow);
-__DEF_FUN2(double, remainder);
-__DEF_FUN1(double, rint);
-__DEF_FUN1(double, round);
-__HIP_OVERLOAD1(bool, signbit)
-__DEF_FUN1(double, sin)
-__DEF_FUN1(double, sinh)
-__DEF_FUN1(double, sqrt)
-__DEF_FUN1(double, tan)
-__DEF_FUN1(double, tanh)
-__DEF_FUN1(double, tgamma)
-__DEF_FUN1(double, trunc);
-
-// define cmath functions with a float and an integer argument.
-#define __DEF_FLOAT_FUN2I(__func) \
- __DEVICE__ \
- inline float __func(float __x, int __y) { return __func##f(__x, __y); }
-__DEF_FLOAT_FUN2I(scalbn)
-
-template <class T> __DEVICE__ inline T min(T __arg1, T __arg2) {
+template <class T> __DEVICE__ T min(T __arg1, T __arg2) {
return (__arg1 < __arg2) ? __arg1 : __arg2;
}
-template <class T> __DEVICE__ inline T max(T __arg1, T __arg2) {
+template <class T> __DEVICE__ T max(T __arg1, T __arg2) {
return (__arg1 > __arg2) ? __arg1 : __arg2;
}
-__DEVICE__ inline int min(int __arg1, int __arg2) {
+__DEVICE__ int min(int __arg1, int __arg2) {
return (__arg1 < __arg2) ? __arg1 : __arg2;
}
-__DEVICE__ inline int max(int __arg1, int __arg2) {
+__DEVICE__ int max(int __arg1, int __arg2) {
return (__arg1 > __arg2) ? __arg1 : __arg2;
}
__DEVICE__
-inline float max(float __x, float __y) { return fmaxf(__x, __y); }
+float max(float __x, float __y) { return fmaxf(__x, __y); }
__DEVICE__
-inline double max(double __x, double __y) { return fmax(__x, __y); }
+double max(double __x, double __y) { return fmax(__x, __y); }
__DEVICE__
-inline float min(float __x, float __y) { return fminf(__x, __y); }
+float min(float __x, float __y) { return fminf(__x, __y); }
__DEVICE__
-inline double min(double __x, double __y) { return fmin(__x, __y); }
-
-__HIP_OVERLOAD2(double, max)
-__HIP_OVERLOAD2(double, min)
+double min(double __x, double __y) { return fmin(__x, __y); }
__host__ inline static int min(int __arg1, int __arg2) {
return std::min(__arg1, __arg2);
@@ -1172,13 +1267,8 @@ __host__ inline static int min(int __arg1, int __arg2) {
__host__ inline static int max(int __arg1, int __arg2) {
return std::max(__arg1, __arg2);
}
+#endif
-#pragma pop_macro("__DEF_FUN1")
-#pragma pop_macro("__DEF_FUN2")
-#pragma pop_macro("__DEF_FUNI")
-#pragma pop_macro("__DEF_FLOAT_FUN2I")
-#pragma pop_macro("__HIP_OVERLOAD1")
-#pragma pop_macro("__HIP_OVERLOAD2")
#pragma pop_macro("__DEVICE__")
#pragma pop_macro("__RETURN_TYPE")
diff --git a/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
index addae5605a5b..81a16a265ae8 100644
--- a/clang/lib/Headers/__clang_hip_runtime_wrapper.h
+++ b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -28,6 +28,10 @@
#define __shared__ __attribute__((shared))
#define __constant__ __attribute__((constant))
+#if !defined(__cplusplus) || __cplusplus < 201103L
+ #define nullptr NULL;
+#endif
+
#if __HIP_ENABLE_DEVICE_MALLOC__
extern "C" __device__ void *__hip_malloc(size_t __size);
extern "C" __device__ void *__hip_free(void *__ptr);
@@ -51,6 +55,7 @@ static inline __device__ void *free(void *__ptr) {
#if !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
#include <__clang_cuda_math_forward_declares.h>
+#include <__clang_hip_cmath.h>
#include <__clang_cuda_complex_builtins.h>
#include <algorithm>
diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index ac5f43836316..4d50d47d51b5 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -1709,6 +1709,20 @@ vec_cmpeq(vector double __a, vector double __b) {
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpeq(vector signed __int128 __a, vector signed __int128 __b) {
+ return (vector bool __int128)__builtin_altivec_vcmpequq(
+ (vector bool __int128)__a, (vector bool __int128)__b);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpeq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return (vector bool __int128)__builtin_altivec_vcmpequq(
+ (vector bool __int128)__a, (vector bool __int128)__b);
+}
+#endif
+
#ifdef __POWER9_VECTOR__
/* vec_cmpne */
@@ -1766,35 +1780,25 @@ vec_cmpne(vector unsigned int __a, vector unsigned int __b) {
(vector int)__b);
}
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector bool long long __a, vector bool long long __b) {
- return (vector bool long long)
- ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)
- ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)
- ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
static __inline__ vector bool int __ATTRS_o_ai
vec_cmpne(vector float __a, vector float __b) {
return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
(vector int)__b);
}
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector double __a, vector double __b) {
- return (vector bool long long)
- ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpne(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
+ (vector bool __int128)__a, (vector bool __int128)__b));
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpne(vector signed __int128 __a, vector signed __int128 __b) {
+ return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
+ (vector bool __int128)__a, (vector bool __int128)__b));
}
+#endif
/* vec_cmpnez */
@@ -1900,6 +1904,86 @@ vec_parity_lsbb(vector signed long long __a) {
return __builtin_altivec_vprtybd(__a);
}
+#else
+/* vec_cmpne */
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector bool char __a, vector bool char __b) {
+ return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector signed char __a, vector signed char __b) {
+ return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector unsigned char __a, vector unsigned char __b) {
+ return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector bool short __a, vector bool short __b) {
+ return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector signed short __a, vector signed short __b) {
+ return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector unsigned short __a, vector unsigned short __b) {
+ return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector bool int __a, vector bool int __b) {
+ return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector signed int __a, vector signed int __b) {
+ return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector unsigned int __a, vector unsigned int __b) {
+ return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector float __a, vector float __b) {
+ return ~(vec_cmpeq(__a, __b));
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)
+ ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)
+ ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)
+ ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+#endif
+
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector double __a, vector double __b) {
+ return (vector bool long long)
+ ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
#endif
/* vec_cmpgt */
@@ -1962,6 +2046,20 @@ vec_cmpgt(vector double __a, vector double __b) {
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpgt(vector signed __int128 __a, vector signed __int128 __b) {
+ return (vector bool __int128)__builtin_altivec_vcmpgtsq(
+ (vector bool __int128)__a, (vector bool __int128)__b);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpgt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return (vector bool __int128)__builtin_altivec_vcmpgtuq(
+ (vector bool __int128)__a, (vector bool __int128)__b);
+}
+#endif
+
/* vec_cmpge */
static __inline__ vector bool char __ATTRS_o_ai
@@ -2022,6 +2120,18 @@ vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpge(vector signed __int128 __a, vector signed __int128 __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpge(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+#endif
+
/* vec_vcmpgefp */
static __inline__ vector bool int __attribute__((__always_inline__))
@@ -2134,6 +2244,18 @@ vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmple(vector signed __int128 __a, vector signed __int128 __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmple(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return vec_cmpge(__b, __a);
+}
+#endif
+
/* vec_cmplt */
static __inline__ vector bool char __ATTRS_o_ai
@@ -2178,6 +2300,18 @@ vec_cmplt(vector double __a, vector double __b) {
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmplt(vector signed __int128 __a, vector signed __int128 __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmplt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return vec_cmpgt(__b, __a);
+}
+#endif
+
#ifdef __POWER8_VECTOR__
static __inline__ vector bool long long __ATTRS_o_ai
vec_cmplt(vector signed long long __a, vector signed long long __b) {
@@ -2702,67 +2836,67 @@ vec_insert_exp(vector unsigned int __a, vector unsigned int __b) {
}
#if defined(__powerpc64__)
-static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(signed char *__a,
+static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(const signed char *__a,
size_t __b) {
return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len(unsigned char *__a, size_t __b) {
+vec_xl_len(const unsigned char *__a, size_t __b) {
return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56));
}
-static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(signed short *__a,
+static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(const signed short *__a,
size_t __b) {
return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_len(unsigned short *__a, size_t __b) {
+vec_xl_len(const unsigned short *__a, size_t __b) {
return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56));
}
-static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(signed int *__a,
+static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(const signed int *__a,
size_t __b) {
return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56));
}
-static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(unsigned int *__a,
+static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(const unsigned int *__a,
size_t __b) {
return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56));
}
-static __inline__ vector float __ATTRS_o_ai vec_xl_len(float *__a, size_t __b) {
+static __inline__ vector float __ATTRS_o_ai vec_xl_len(const float *__a, size_t __b) {
return (vector float)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_len(signed __int128 *__a, size_t __b) {
+vec_xl_len(const signed __int128 *__a, size_t __b) {
return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_len(unsigned __int128 *__a, size_t __b) {
+vec_xl_len(const unsigned __int128 *__a, size_t __b) {
return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_len(signed long long *__a, size_t __b) {
+vec_xl_len(const signed long long *__a, size_t __b) {
return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_len(unsigned long long *__a, size_t __b) {
+vec_xl_len(const unsigned long long *__a, size_t __b) {
return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56));
}
-static __inline__ vector double __ATTRS_o_ai vec_xl_len(double *__a,
+static __inline__ vector double __ATTRS_o_ai vec_xl_len(const double *__a,
size_t __b) {
return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len_r(unsigned char *__a, size_t __b) {
+vec_xl_len_r(const unsigned char *__a, size_t __b) {
vector unsigned char __res =
(vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
#ifdef __LITTLE_ENDIAN__
@@ -2862,12 +2996,12 @@ static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a,
#ifdef __VSX__
static __inline__ vector float __ATTRS_o_ai vec_cpsgn(vector float __a,
vector float __b) {
- return __builtin_vsx_xvcpsgnsp(__a, __b);
+ return __builtin_vsx_xvcpsgnsp(__b, __a);
}
static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
vector double __b) {
- return __builtin_vsx_xvcpsgndp(__a, __b);
+ return __builtin_vsx_xvcpsgndp(__b, __a);
}
#endif
@@ -2951,6 +3085,42 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#define vec_vctuxs __builtin_altivec_vctuxs
+/* vec_signext */
+
+#ifdef __POWER9_VECTOR__
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signexti(vector signed char __a) {
+ return __builtin_altivec_vextsb2w(__a);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signexti(vector signed short __a) {
+ return __builtin_altivec_vextsh2w(__a);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signextll(vector signed char __a) {
+ return __builtin_altivec_vextsb2d(__a);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signextll(vector signed short __a) {
+ return __builtin_altivec_vextsh2d(__a);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signextll(vector signed int __a) {
+ return __builtin_altivec_vextsw2d(__a);
+}
+#endif
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_signextq(vector signed long long __a) {
+ return __builtin_altivec_vextsd2q(__a);
+}
+#endif
+
/* vec_signed */
static __inline__ vector signed int __ATTRS_o_ai
@@ -3288,6 +3458,66 @@ static __inline__ vector double __ATTRS_o_ai vec_div(vector double __a,
}
#endif
+/* vec_dive */
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed int __ATTRS_o_ai
+vec_dive(vector signed int __a, vector signed int __b) {
+ return __builtin_altivec_vdivesw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_dive(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vdiveuw(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_dive(vector signed long long __a, vector signed long long __b) {
+ return __builtin_altivec_vdivesd(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_dive(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vdiveud(__a, __b);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_dive(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vdiveuq(__a, __b);
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_dive(vector signed __int128 __a, vector signed __int128 __b) {
+ return __builtin_altivec_vdivesq(__a, __b);
+}
+#endif
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_div(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a / __b;
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_div(vector signed __int128 __a, vector signed __int128 __b) {
+ return __a / __b;
+}
+#endif /* __POWER10_VECTOR__ */
+
+/* vec_xvtdiv */
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_test_swdiv(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvtdivdp(__a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_test_swdivs(vector float __a,
+ vector float __b) {
+ return __builtin_vsx_xvtdivsp(__a, __b);
+}
+#endif
+
/* vec_dss */
#define vec_dss __builtin_altivec_dss
@@ -3300,23 +3530,19 @@ static __inline__ void __attribute__((__always_inline__)) vec_dssall(void) {
/* vec_dst */
#define vec_dst(__PTR, __CW, __STR) \
- __extension__( \
- { __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR)); })
+ __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR))
/* vec_dstst */
#define vec_dstst(__PTR, __CW, __STR) \
- __extension__( \
- { __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR)); })
+ __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR))
/* vec_dststt */
#define vec_dststt(__PTR, __CW, __STR) \
- __extension__( \
- { __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR)); })
+ __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR))
/* vec_dstt */
#define vec_dstt(__PTR, __CW, __STR) \
- __extension__( \
- { __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR)); })
+ __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR))
/* vec_eqv */
@@ -5467,6 +5693,16 @@ vec_msum(vector unsigned short __a, vector unsigned short __b,
return __builtin_altivec_vmsumuhm(__a, __b, __c);
}
+/* vec_msumc */
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_msumc(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vmsumcud(__a, __b, __c);
+}
+#endif
+
/* vec_vmsummbm */
static __inline__ vector int __attribute__((__always_inline__))
@@ -5693,6 +5929,26 @@ vec_mule(vector unsigned int __a, vector unsigned int __b) {
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_mule(vector signed long long __a, vector signed long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosd(__a, __b);
+#else
+ return __builtin_altivec_vmulesd(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_mule(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloud(__a, __b);
+#else
+ return __builtin_altivec_vmuleud(__a, __b);
+#endif
+}
+#endif
+
/* vec_vmulesb */
static __inline__ vector short __attribute__((__always_inline__))
@@ -5737,6 +5993,30 @@ vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) {
#endif
}
+/* vec_mulh */
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed int __ATTRS_o_ai
+vec_mulh(vector signed int __a, vector signed int __b) {
+ return __builtin_altivec_vmulhsw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mulh(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vmulhuw(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mulh(vector signed long long __a, vector signed long long __b) {
+ return __builtin_altivec_vmulhsd(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mulh(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vmulhud(__a, __b);
+}
+#endif
+
/* vec_mulo */
static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
@@ -5795,6 +6075,26 @@ vec_mulo(vector unsigned int __a, vector unsigned int __b) {
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_mulo(vector signed long long __a, vector signed long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesd(__a, __b);
+#else
+ return __builtin_altivec_vmulosd(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_mulo(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleud(__a, __b);
+#else
+ return __builtin_altivec_vmuloud(__a, __b);
+#endif
+}
+#endif
+
/* vec_vmulosb */
static __inline__ vector short __attribute__((__always_inline__))
@@ -7627,6 +7927,18 @@ vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_rl(vector signed __int128 __a, vector unsigned __int128 __b) {
+ return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector signed __int128)) - __a));
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_rl(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector unsigned __int128)) - __a));
+}
+#endif
+
/* vec_rlmi */
#ifdef __POWER9_VECTOR__
static __inline__ vector unsigned int __ATTRS_o_ai
@@ -7640,8 +7952,24 @@ vec_rlmi(vector unsigned long long __a, vector unsigned long long __b,
vector unsigned long long __c) {
return __builtin_altivec_vrldmi(__a, __c, __b);
}
+#endif
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_rlmi(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vrlqmi(__a, __c, __b);
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_rlmi(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vrlqmi(__a, __c, __b);
+}
+#endif
/* vec_rlnm */
+#ifdef __POWER9_VECTOR__
static __inline__ vector unsigned int __ATTRS_o_ai
vec_rlnm(vector unsigned int __a, vector unsigned int __b,
vector unsigned int __c) {
@@ -7657,6 +7985,42 @@ vec_rlnm(vector unsigned long long __a, vector unsigned long long __b,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_rlnm(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ // Merge __b and __c using an appropriate shuffle.
+ vector unsigned char TmpB = (vector unsigned char)__b;
+ vector unsigned char TmpC = (vector unsigned char)__c;
+ vector unsigned char MaskAndShift =
+#ifdef __LITTLE_ENDIAN__
+ __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, -1, -1, -1, 16, 0,
+ 1, -1, -1, -1, -1, -1);
+#else
+ __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, 31, 30, 15, -1,
+ -1, -1, -1, -1, -1, -1, -1);
+#endif
+ return __builtin_altivec_vrlqnm(__a, (vector unsigned __int128) MaskAndShift);
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_rlnm(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ // Merge __b and __c using an appropriate shuffle.
+ vector unsigned char TmpB = (vector unsigned char)__b;
+ vector unsigned char TmpC = (vector unsigned char)__c;
+ vector unsigned char MaskAndShift =
+#ifdef __LITTLE_ENDIAN__
+ __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, -1, -1, -1, 16, 0,
+ 1, -1, -1, -1, -1, -1);
+#else
+ __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, 31, 30, 15, -1,
+ -1, -1, -1, -1, -1, -1, -1);
+#endif
+ return __builtin_altivec_vrlqnm(__a, (vector unsigned __int128) MaskAndShift);
+}
+#endif
+
/* vec_vrlb */
static __inline__ vector signed char __ATTRS_o_ai
@@ -7771,6 +8135,18 @@ vec_vrsqrtefp(vector float __a) {
return __builtin_altivec_vrsqrtefp(__a);
}
+/* vec_xvtsqrt */
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_test_swsqrt(vector double __a) {
+ return __builtin_vsx_xvtsqrtdp(__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_test_swsqrts(vector float __a) {
+ return __builtin_vsx_xvtsqrtsp(__a);
+}
+#endif
+
/* vec_sel */
#define __builtin_altivec_vsel_4si vec_sel
@@ -7905,6 +8281,46 @@ vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
((vector long long)__b & (vector long long)__c);
return (vector double)__res;
}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_sel(vector bool long long __a, vector bool long long __b,
+ vector bool long long __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_sel(vector bool long long __a, vector bool long long __b,
+ vector unsigned long long __c) {
+ return (__a & ~(vector bool long long)__c) |
+ (__b & (vector bool long long)__c);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sel(vector signed long long __a, vector signed long long __b,
+ vector bool long long __c) {
+ return (__a & ~(vector signed long long)__c) |
+ (__b & (vector signed long long)__c);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sel(vector signed long long __a, vector signed long long __b,
+ vector unsigned long long __c) {
+ return (__a & ~(vector signed long long)__c) |
+ (__b & (vector signed long long)__c);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sel(vector unsigned long long __a, vector unsigned long long __b,
+ vector bool long long __c) {
+ return (__a & ~(vector unsigned long long)__c) |
+ (__b & (vector unsigned long long)__c);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sel(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return (__a & ~__c) | (__b & __c);
+}
#endif
/* vec_vsel */
@@ -13900,6 +14316,18 @@ static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+}
+#endif
+
/* vec_all_ge */
static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
@@ -14071,6 +14499,18 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __b, __a);
+}
+#endif
+
/* vec_all_gt */
static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
@@ -14242,6 +14682,18 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __a, __b);
+}
+#endif
+
/* vec_all_in */
static __inline__ int __attribute__((__always_inline__))
@@ -14421,6 +14873,18 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __a, __b);
+}
+#endif
+
/* vec_all_lt */
static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
@@ -14593,6 +15057,18 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __b, __a);
+}
+#endif
+
/* vec_all_nan */
static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) {
@@ -14797,6 +15273,18 @@ static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+}
+#endif
+
/* vec_all_nge */
static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a,
@@ -15042,6 +15530,18 @@ static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
/* vec_any_ge */
static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
@@ -15221,6 +15721,18 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __b, __a);
+}
+#endif
+
/* vec_any_gt */
static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
@@ -15400,6 +15912,18 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
/* vec_any_le */
static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
@@ -15579,6 +16103,18 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
/* vec_any_lt */
static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
@@ -15758,6 +16294,18 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __b, __a);
+}
+#endif
+
/* vec_any_nan */
static __inline__ int __attribute__((__always_inline__))
@@ -15953,6 +16501,18 @@ static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a,
}
#endif
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a,
+ vector unsigned __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
/* vec_any_nge */
static __inline__ int __attribute__((__always_inline__))
@@ -16353,41 +16913,41 @@ typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
typedef vector float unaligned_vec_float __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
- signed char *__ptr) {
+ const signed char *__ptr) {
return *(unaligned_vec_schar *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned char
-vec_xl(signed long long __offset, unsigned char *__ptr) {
+vec_xl(signed long long __offset, const unsigned char *__ptr) {
return *(unaligned_vec_uchar*)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
- signed short *__ptr) {
+ const signed short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sshort *)__addr;
}
static inline __ATTRS_o_ai vector unsigned short
-vec_xl(signed long long __offset, unsigned short *__ptr) {
+vec_xl(signed long long __offset, const unsigned short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ushort *)__addr;
}
static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
- signed int *__ptr) {
+ const signed int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sint *)__addr;
}
static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
- unsigned int *__ptr) {
+ const unsigned int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_uint *)__addr;
}
static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
- float *__ptr) {
+ const float *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_float *)__addr;
}
@@ -16398,19 +16958,19 @@ typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
typedef vector double unaligned_vec_double __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed long long
-vec_xl(signed long long __offset, signed long long *__ptr) {
+vec_xl(signed long long __offset, const signed long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sll *)__addr;
}
static inline __ATTRS_o_ai vector unsigned long long
-vec_xl(signed long long __offset, unsigned long long *__ptr) {
+vec_xl(signed long long __offset, const unsigned long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ull *)__addr;
}
static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
- double *__ptr) {
+ const double *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_double *)__addr;
}
@@ -16421,13 +16981,13 @@ typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
typedef vector unsigned __int128 unaligned_vec_ui128
__attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed __int128
-vec_xl(signed long long __offset, signed __int128 *__ptr) {
+vec_xl(signed long long __offset, const signed __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_si128 *)__addr;
}
static inline __ATTRS_o_ai vector unsigned __int128
-vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
+vec_xl(signed long long __offset, const unsigned __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ui128 *)__addr;
}
@@ -16437,71 +16997,71 @@ vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
#ifdef __LITTLE_ENDIAN__
static __inline__ vector signed char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed char *__ptr) {
+vec_xl_be(signed long long __offset, const signed char *__ptr) {
vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
13, 12, 11, 10, 9, 8);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned char *__ptr) {
+vec_xl_be(signed long long __offset, const unsigned char *__ptr) {
vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
13, 12, 11, 10, 9, 8);
}
static __inline__ vector signed short __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed short *__ptr) {
+vec_xl_be(signed long long __offset, const signed short *__ptr) {
vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned short *__ptr) {
+vec_xl_be(signed long long __offset, const unsigned short *__ptr) {
vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
}
static __inline__ vector signed int __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed int *__ptr) {
+vec_xl_be(signed long long __offset, const signed int *__ptr) {
return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned int *__ptr) {
+vec_xl_be(signed long long __offset, const unsigned int *__ptr) {
return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
}
static __inline__ vector float __ATTRS_o_ai
-vec_xl_be(signed long long __offset, float *__ptr) {
+vec_xl_be(signed long long __offset, const float *__ptr) {
return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr);
}
#ifdef __VSX__
static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed long long *__ptr) {
+vec_xl_be(signed long long __offset, const signed long long *__ptr) {
return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned long long *__ptr) {
+vec_xl_be(signed long long __offset, const unsigned long long *__ptr) {
return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
}
static __inline__ vector double __ATTRS_o_ai
-vec_xl_be(signed long long __offset, double *__ptr) {
+vec_xl_be(signed long long __offset, const double *__ptr) {
return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr);
}
#endif
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed __int128 *__ptr) {
+vec_xl_be(signed long long __offset, const signed __int128 *__ptr) {
return vec_xl(__offset, __ptr);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned __int128 *__ptr) {
+vec_xl_be(signed long long __offset, const unsigned __int128 *__ptr) {
return vec_xl(__offset, __ptr);
}
#endif
@@ -16509,6 +17069,54 @@ vec_xl_be(signed long long __offset, unsigned __int128 *__ptr) {
#define vec_xl_be vec_xl
#endif
+#if defined(__POWER10_VECTOR__) && defined(__VSX__)
+
+/* vect_xl_sext */
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed char *__pointer) {
+ return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed short *__pointer) {
+ return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed int *__pointer) {
+ return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed long long *__pointer) {
+ return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+/* vec_xl_zext */
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned char *__pointer) {
+ return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned short *__pointer) {
+ return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned int *__pointer) {
+ return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned long long *__pointer) {
+ return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+#endif
+
/* vec_xst */
static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
@@ -16597,6 +17205,58 @@ static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
}
#endif
+/* vec_xst_trunc */
+
+#if defined(__POWER10_VECTOR__) && defined(__VSX__)
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+ signed long long __offset,
+ signed char *__ptr) {
+ *(__ptr + __offset) = (signed char)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+ signed long long __offset,
+ unsigned char *__ptr) {
+ *(__ptr + __offset) = (unsigned char)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+ signed long long __offset,
+ signed short *__ptr) {
+ *(__ptr + __offset) = (signed short)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+ signed long long __offset,
+ unsigned short *__ptr) {
+ *(__ptr + __offset) = (unsigned short)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+ signed long long __offset,
+ signed int *__ptr) {
+ *(__ptr + __offset) = (signed int)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+ signed long long __offset,
+ unsigned int *__ptr) {
+ *(__ptr + __offset) = (unsigned int)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+ signed long long __offset,
+ signed long long *__ptr) {
+ *(__ptr + __offset) = (signed long long)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+ signed long long __offset,
+ unsigned long long *__ptr) {
+ *(__ptr + __offset) = (unsigned long long)__vec[0];
+}
+#endif
+
/* vec_xst_be */
#ifdef __LITTLE_ENDIAN__
@@ -16763,6 +17423,100 @@ static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
}
#ifdef __POWER10_VECTOR__
+
+/* vec_extractm */
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned char __a) {
+ return __builtin_altivec_vextractbm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned short __a) {
+ return __builtin_altivec_vextracthm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned int __a) {
+ return __builtin_altivec_vextractwm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned long long __a) {
+ return __builtin_altivec_vextractdm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned __int128 __a) {
+ return __builtin_altivec_vextractqm(__a);
+}
+
+/* vec_expandm */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_expandm(vector unsigned char __a) {
+ return __builtin_altivec_vexpandbm(__a);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_expandm(vector unsigned short __a) {
+ return __builtin_altivec_vexpandhm(__a);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_expandm(vector unsigned int __a) {
+ return __builtin_altivec_vexpandwm(__a);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_expandm(vector unsigned long long __a) {
+ return __builtin_altivec_vexpanddm(__a);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_expandm(vector unsigned __int128 __a) {
+ return __builtin_altivec_vexpandqm(__a);
+}
+
+/* vec_cntm */
+
+#define vec_cntm(__a, __mp) \
+ _Generic((__a), vector unsigned char \
+ : __builtin_altivec_vcntmbb((__a), (unsigned int)(__mp)), \
+ vector unsigned short \
+ : __builtin_altivec_vcntmbh((__a), (unsigned int)(__mp)), \
+ vector unsigned int \
+ : __builtin_altivec_vcntmbw((__a), (unsigned int)(__mp)), \
+ vector unsigned long long \
+ : __builtin_altivec_vcntmbd((__a), (unsigned int)(__mp)))
+
+/* vec_gen[b|h|w|d|q]m */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_genbm(unsigned long long __bm) {
+ return __builtin_altivec_mtvsrbm(__bm);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_genhm(unsigned long long __bm) {
+ return __builtin_altivec_mtvsrhm(__bm);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_genwm(unsigned long long __bm) {
+ return __builtin_altivec_mtvsrwm(__bm);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_gendm(unsigned long long __bm) {
+ return __builtin_altivec_mtvsrdm(__bm);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_genqm(unsigned long long __bm) {
+ return __builtin_altivec_mtvsrqm(__bm);
+}
+
/* vec_pdep */
static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -16881,6 +17635,38 @@ vec_cnttzm(vector unsigned long long __a, vector unsigned long long __b) {
return __builtin_altivec_vctzdm(__a, __b);
}
+/* vec_mod */
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_mod(vector signed int __a, vector signed int __b) {
+ return __a % __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mod(vector unsigned int __a, vector unsigned int __b) {
+ return __a % __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mod(vector signed long long __a, vector signed long long __b) {
+ return __a % __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mod(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a % __b;
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_mod(vector signed __int128 __a, vector signed __int128 __b) {
+ return __a % __b;
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_mod(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a % __b;
+}
+
/* vec_sldbi */
#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
@@ -17027,6 +17813,92 @@ vec_inserth(vector unsigned int __a, vector unsigned int __b,
#endif
}
+/* vec_extractl */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
+ vector unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vextdubvrx(__a, __b, __c);
+#else
+ vector unsigned long long __ret = __builtin_altivec_vextdubvlx(__a, __b, __c);
+ return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
+ vector unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vextduhvrx(__a, __b, __c);
+#else
+ vector unsigned long long __ret = __builtin_altivec_vextduhvlx(__a, __b, __c);
+ return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
+ vector unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vextduwvrx(__a, __b, __c);
+#else
+ vector unsigned long long __ret = __builtin_altivec_vextduwvlx(__a, __b, __c);
+ return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_extractl(vector unsigned long long __a, vector unsigned long long __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vextddvrx(__a, __b, __c);
+#else
+ vector unsigned long long __ret = __builtin_altivec_vextddvlx(__a, __b, __c);
+ return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+/* vec_extracth */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
+ vector unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vextdubvlx(__a, __b, __c);
+#else
+ vector unsigned long long __ret = __builtin_altivec_vextdubvrx(__a, __b, __c);
+ return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
+ vector unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vextduhvlx(__a, __b, __c);
+#else
+ vector unsigned long long __ret = __builtin_altivec_vextduhvrx(__a, __b, __c);
+ return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
+ vector unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vextduwvlx(__a, __b, __c);
+#else
+ vector unsigned long long __ret = __builtin_altivec_vextduwvrx(__a, __b, __c);
+ return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_extracth(vector unsigned long long __a, vector unsigned long long __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vextddvlx(__a, __b, __c);
+#else
+ vector unsigned long long __ret = __builtin_altivec_vextddvrx(__a, __b, __c);
+ return vec_sld(__ret, __ret, 8);
+#endif
+}
+
#ifdef __VSX__
/* vec_permx */
@@ -17095,6 +17967,14 @@ vec_blendv(vector double __a, vector double __b,
return __builtin_vsx_xxblendvd(__a, __b, __c);
}
+/* vec_replace_elt */
+
+#define vec_replace_elt __builtin_altivec_vec_replace_elt
+
+/* vec_replace_unaligned */
+
+#define vec_replace_unaligned __builtin_altivec_vec_replace_unaligned
+
/* vec_splati */
#define vec_splati(__a) \
@@ -17161,6 +18041,197 @@ vec_test_lsbb_all_zeros(vector unsigned char __a) {
return __builtin_vsx_xvtlsbb(__a, 0);
}
#endif /* __VSX__ */
+
+/* vec_stril */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_stril(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstribr((vector signed char)__a);
+#else
+ return __builtin_altivec_vstribl((vector signed char)__a);
+#endif
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_stril(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstribr(__a);
+#else
+ return __builtin_altivec_vstribl(__a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_stril(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstrihr((vector signed short)__a);
+#else
+ return __builtin_altivec_vstrihl((vector signed short)__a);
+#endif
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_stril(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstrihr(__a);
+#else
+ return __builtin_altivec_vstrihl(__a);
+#endif
+}
+
+/* vec_stril_p */
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+#else
+ return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+#else
+ return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstrihr_p(__CR6_EQ, (vector signed short)__a);
+#else
+ return __builtin_altivec_vstrihl_p(__CR6_EQ, (vector signed short)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstrihr_p(__CR6_EQ, __a);
+#else
+ return __builtin_altivec_vstrihl_p(__CR6_EQ, __a);
+#endif
+}
+
+/* vec_strir */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_strir(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstribl((vector signed char)__a);
+#else
+ return __builtin_altivec_vstribr((vector signed char)__a);
+#endif
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_strir(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstribl(__a);
+#else
+ return __builtin_altivec_vstribr(__a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_strir(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstrihl((vector signed short)__a);
+#else
+ return __builtin_altivec_vstrihr((vector signed short)__a);
+#endif
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_strir(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstrihl(__a);
+#else
+ return __builtin_altivec_vstrihr(__a);
+#endif
+}
+
+/* vec_strir_p */
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+#else
+ return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+#else
+ return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstrihl_p(__CR6_EQ, (vector signed short)__a);
+#else
+ return __builtin_altivec_vstrihr_p(__CR6_EQ, (vector signed short)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vstrihl_p(__CR6_EQ, __a);
+#else
+ return __builtin_altivec_vstrihr_p(__CR6_EQ, __a);
+#endif
+}
+
+/* vs[l | r | ra] */
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sl(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+ __CHAR_BIT__));
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sl(vector signed __int128 __a, vector unsigned __int128 __b) {
+ return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+ __CHAR_BIT__));
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sr(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a >> (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+ __CHAR_BIT__));
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sr(vector signed __int128 __a, vector unsigned __int128 __b) {
+ return (
+ vector signed __int128)(((vector unsigned __int128)__a) >>
+ (__b %
+ (vector unsigned __int128)(sizeof(
+ unsigned __int128) *
+ __CHAR_BIT__)));
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sra(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return (
+ vector unsigned __int128)(((vector signed __int128)__a) >>
+ (__b %
+ (vector unsigned __int128)(sizeof(
+ unsigned __int128) *
+ __CHAR_BIT__)));
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sra(vector signed __int128 __a, vector unsigned __int128 __b) {
+ return __a >> (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+ __CHAR_BIT__));
+}
+
#endif /* __POWER10_VECTOR__ */
#undef __ATTRS_o_ai
diff --git a/clang/lib/Headers/amxintrin.h b/clang/lib/Headers/amxintrin.h
index 58254e21c81a..823c7ca1f076 100644
--- a/clang/lib/Headers/amxintrin.h
+++ b/clang/lib/Headers/amxintrin.h
@@ -15,8 +15,8 @@
#define __AMXINTRIN_H
#ifdef __x86_64__
-#define __DEFAULT_FN_ATTRS \
- __attribute__((__always_inline__, __nodebug__, __target__("amx-tile")))
+#define __DEFAULT_FN_ATTRS_TILE \
+ __attribute__((__always_inline__, __nodebug__, __target__("amx-tile")))
/// Load tile configuration from a 64-byte memory location specified by
/// "mem_addr". The tile configuration includes the tile type palette, the
@@ -31,9 +31,8 @@
///
/// \param __config
/// A pointer to 512-bits configuration
-static __inline__ void __DEFAULT_FN_ATTRS
-_tile_loadconfig(const void *__config)
-{
+static __inline__ void __DEFAULT_FN_ATTRS_TILE
+_tile_loadconfig(const void *__config) {
__builtin_ia32_tile_loadconfig(__config);
}
@@ -48,9 +47,8 @@ _tile_loadconfig(const void *__config)
///
/// \param __config
/// A pointer to 512-bits configuration
-static __inline__ void __DEFAULT_FN_ATTRS
-_tile_storeconfig(void *__config)
-{
+static __inline__ void __DEFAULT_FN_ATTRS_TILE
+_tile_storeconfig(void *__config) {
__builtin_ia32_tile_storeconfig(__config);
}
@@ -60,9 +58,7 @@ _tile_storeconfig(void *__config)
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> TILERELEASE </c> instruction.
-static __inline__ void __DEFAULT_FN_ATTRS
-_tile_release(void)
-{
+static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
__builtin_ia32_tilerelease();
}
@@ -80,8 +76,9 @@ _tile_release(void)
/// A pointer to base address.
/// \param stride
/// The stride between the rows' data to be loaded in memory.
-#define _tile_loadd(dst, base, stride) \
- __builtin_ia32_tileloadd64((dst), ((const void *)(base)), (__SIZE_TYPE__)(stride))
+#define _tile_loadd(dst, base, stride) \
+ __builtin_ia32_tileloadd64((dst), ((const void *)(base)), \
+ (__SIZE_TYPE__)(stride))
/// Load tile rows from memory specifieid by "base" address and "stride" into
/// destination tile "dst" using the tile configuration previously configured
@@ -99,8 +96,9 @@ _tile_release(void)
/// A pointer to base address.
/// \param stride
/// The stride between the rows' data to be loaded in memory.
-#define _tile_stream_loadd(dst, base, stride) \
- __builtin_ia32_tileloaddt164((dst), ((const void *)(base)), (__SIZE_TYPE__)(stride))
+#define _tile_stream_loadd(dst, base, stride) \
+ __builtin_ia32_tileloaddt164((dst), ((const void *)(base)), \
+ (__SIZE_TYPE__)(stride))
/// Store the tile specified by "src" to memory specifieid by "base" address and
/// "stride" using the tile configuration previously configured via
@@ -116,7 +114,7 @@ _tile_release(void)
/// A pointer to base address.
/// \param stride
/// The stride between the rows' data to be stored in memory.
-#define _tile_stored(dst, base, stride) \
+#define _tile_stored(dst, base, stride) \
__builtin_ia32_tilestored64((dst), ((void *)(base)), (__SIZE_TYPE__)(stride))
/// Zero the tile specified by "tdest".
@@ -145,7 +143,8 @@ _tile_release(void)
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
-#define _tile_dpbssd(dst, src0, src1) __builtin_ia32_tdpbssd((dst), (src0), (src1))
+#define _tile_dpbssd(dst, src0, src1) \
+ __builtin_ia32_tdpbssd((dst), (src0), (src1))
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
@@ -163,7 +162,8 @@ _tile_release(void)
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
-#define _tile_dpbsud(dst, src0, src1) __builtin_ia32_tdpbsud((dst), (src0), (src1))
+#define _tile_dpbsud(dst, src0, src1) \
+ __builtin_ia32_tdpbsud((dst), (src0), (src1))
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
@@ -181,7 +181,8 @@ _tile_release(void)
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
-#define _tile_dpbusd(dst, src0, src1) __builtin_ia32_tdpbusd((dst), (src0), (src1))
+#define _tile_dpbusd(dst, src0, src1) \
+ __builtin_ia32_tdpbusd((dst), (src0), (src1))
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
@@ -199,7 +200,8 @@ _tile_release(void)
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
-#define _tile_dpbuud(dst, src0, src1) __builtin_ia32_tdpbuud((dst), (src0), (src1))
+#define _tile_dpbuud(dst, src0, src1) \
+ __builtin_ia32_tdpbuud((dst), (src0), (src1))
/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and
/// src1, accumulating the intermediate single-precision (32-bit) floating-point
@@ -216,10 +218,61 @@ _tile_release(void)
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
-#define _tile_dpbf16ps(dst, src0, src1) \
+#define _tile_dpbf16ps(dst, src0, src1) \
__builtin_ia32_tdpbf16ps((dst), (src0), (src1))
-#undef __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS_INT8 \
+ __attribute__((__always_inline__, __nodebug__, __target__("amx-int8")))
+
+typedef int _tile1024i __attribute__((__vector_size__(1024), __aligned__(64)));
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
+_tile_loadd_internal(unsigned short m, unsigned short n, const void *base,
+ __SIZE_TYPE__ stride) {
+ return __builtin_ia32_tileloadd64_internal(m, n, base,
+ (__SIZE_TYPE__)(stride));
+}
+
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
+_tile_dpbssd_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tdpbssd_internal(m, n, k, dst, src1, src2);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS_INT8
+_tile_stored_internal(unsigned short m, unsigned short n, void *base,
+ __SIZE_TYPE__ stride, _tile1024i tile) {
+ return __builtin_ia32_tilestored64_internal(m, n, base,
+ (__SIZE_TYPE__)(stride), tile);
+}
+
+typedef struct __tile1024i_str {
+ const unsigned short row;
+ const unsigned short col;
+ _tile1024i tile;
+} __tile1024i;
+
+__DEFAULT_FN_ATTRS_TILE
+static void __tile_loadd(__tile1024i *dst, const void *base,
+ __SIZE_TYPE__ stride) {
+ dst->tile = _tile_loadd_internal(dst->row, dst->col, base, stride);
+}
+
+__DEFAULT_FN_ATTRS_INT8
+static void __tile_dpbssd(__tile1024i *dst, __tile1024i src1,
+ __tile1024i src2) {
+ dst->tile = _tile_dpbssd_internal(src1.row, src2.col, src1.col, dst->tile,
+ src1.tile, src2.tile);
+}
+
+__DEFAULT_FN_ATTRS_TILE
+static void __tile_stored(void *base, __SIZE_TYPE__ stride, __tile1024i src) {
+ _tile_stored_internal(src.row, src.col, base, stride, src.tile);
+}
+
+__DEFAULT_FN_ATTRS_TILE
+static void __tile_zero(__tile1024i *dst) {
+ dst->tile = __builtin_ia32_tilezero_internal(dst->row, dst->col);
+}
#endif /* __x86_64__ */
#endif /* __AMXINTRIN_H */
diff --git a/clang/lib/Headers/arm_acle.h b/clang/lib/Headers/arm_acle.h
index de568b4ff9c5..c156d89c1f84 100644
--- a/clang/lib/Headers/arm_acle.h
+++ b/clang/lib/Headers/arm_acle.h
@@ -639,6 +639,32 @@ __jcvt(double __a) {
}
#endif
+/* Armv8.7-A load/store 64-byte intrinsics */
+#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64)
+typedef struct {
+ uint64_t val[8];
+} data512_t;
+
+static __inline__ data512_t __attribute__((__always_inline__, __nodebug__))
+__arm_ld64b(const void *__addr) {
+ data512_t __value;
+ __builtin_arm_ld64b(__addr, __value.val);
+ return __value;
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__arm_st64b(void *__addr, data512_t __value) {
+ __builtin_arm_st64b(__addr, __value.val);
+}
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__arm_st64bv(void *__addr, data512_t __value) {
+ return __builtin_arm_st64bv(__addr, __value.val);
+}
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__arm_st64bv0(void *__addr, data512_t __value) {
+ return __builtin_arm_st64bv0(__addr, __value.val);
+}
+#endif
+
/* 10.1 Special register intrinsics */
#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index fa22ef3fdd18..2ee4350b14d4 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -9305,295 +9305,218 @@ _mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
* This takes log2(n) steps where n is the number of elements in the vector.
*/
-#define _mm512_mask_reduce_operator(op) \
- __v4du __t1 = (__v4du)_mm512_extracti64x4_epi64(__W, 0); \
- __v4du __t2 = (__v4du)_mm512_extracti64x4_epi64(__W, 1); \
- __m256i __t3 = (__m256i)(__t1 op __t2); \
- __v2du __t4 = (__v2du)_mm256_extracti128_si256(__t3, 0); \
- __v2du __t5 = (__v2du)_mm256_extracti128_si256(__t3, 1); \
- __v2du __t6 = __t4 op __t5; \
- __v2du __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
- __v2du __t8 = __t6 op __t7; \
- return __t8[0]
-
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
- _mm512_mask_reduce_operator(+);
+ return __builtin_ia32_reduce_add_q512(__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
- _mm512_mask_reduce_operator(*);
+ return __builtin_ia32_reduce_mul_q512(__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
- _mm512_mask_reduce_operator(&);
+ return __builtin_ia32_reduce_and_q512(__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
- _mm512_mask_reduce_operator(|);
+ return __builtin_ia32_reduce_or_q512(__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi64(__M, __W);
- _mm512_mask_reduce_operator(+);
+ return __builtin_ia32_reduce_add_q512(__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
- _mm512_mask_reduce_operator(*);
+ return __builtin_ia32_reduce_mul_q512(__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
- _mm512_mask_reduce_operator(&);
+ return __builtin_ia32_reduce_and_q512(__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi64(__M, __W);
- _mm512_mask_reduce_operator(|);
+ return __builtin_ia32_reduce_or_q512(__W);
}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
- __m256d __t1 = _mm512_extractf64x4_pd(__W, 0); \
- __m256d __t2 = _mm512_extractf64x4_pd(__W, 1); \
- __m256d __t3 = __t1 op __t2; \
- __m128d __t4 = _mm256_extractf128_pd(__t3, 0); \
- __m128d __t5 = _mm256_extractf128_pd(__t3, 1); \
- __m128d __t6 = __t4 op __t5; \
- __m128d __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
- __m128d __t8 = __t6 op __t7; \
- return __t8[0]
static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_add_pd(__m512d __W) {
- _mm512_mask_reduce_operator(+);
+ return __builtin_ia32_reduce_fadd_pd512(0.0, __W);
}
static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_pd(__m512d __W) {
- _mm512_mask_reduce_operator(*);
+ return __builtin_ia32_reduce_fmul_pd512(1.0, __W);
}
static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) {
__W = _mm512_maskz_mov_pd(__M, __W);
- _mm512_mask_reduce_operator(+);
+ return __builtin_ia32_reduce_fadd_pd512(0.0, __W);
}
static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
__W = _mm512_mask_mov_pd(_mm512_set1_pd(1.0), __M, __W);
- _mm512_mask_reduce_operator(*);
+ return __builtin_ia32_reduce_fmul_pd512(1.0, __W);
}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
- __v8su __t1 = (__v8su)_mm512_extracti64x4_epi64(__W, 0); \
- __v8su __t2 = (__v8su)_mm512_extracti64x4_epi64(__W, 1); \
- __m256i __t3 = (__m256i)(__t1 op __t2); \
- __v4su __t4 = (__v4su)_mm256_extracti128_si256(__t3, 0); \
- __v4su __t5 = (__v4su)_mm256_extracti128_si256(__t3, 1); \
- __v4su __t6 = __t4 op __t5; \
- __v4su __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
- __v4su __t8 = __t6 op __t7; \
- __v4su __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
- __v4su __t10 = __t8 op __t9; \
- return __t10[0]
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_add_epi32(__m512i __W) {
- _mm512_mask_reduce_operator(+);
+ return __builtin_ia32_reduce_add_d512((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_mul_epi32(__m512i __W) {
- _mm512_mask_reduce_operator(*);
+ return __builtin_ia32_reduce_mul_d512((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_and_epi32(__m512i __W) {
- _mm512_mask_reduce_operator(&);
+ return __builtin_ia32_reduce_and_d512((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_or_epi32(__m512i __W) {
- _mm512_mask_reduce_operator(|);
+ return __builtin_ia32_reduce_or_d512((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi32(__M, __W);
- _mm512_mask_reduce_operator(+);
+ return __builtin_ia32_reduce_add_d512((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
__W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
- _mm512_mask_reduce_operator(*);
+ return __builtin_ia32_reduce_mul_d512((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
__W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
- _mm512_mask_reduce_operator(&);
+ return __builtin_ia32_reduce_and_d512((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi32(__M, __W);
- _mm512_mask_reduce_operator(|);
+ return __builtin_ia32_reduce_or_d512((__v16si)__W);
}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
- __m256 __t1 = (__m256)_mm512_extractf64x4_pd((__m512d)__W, 0); \
- __m256 __t2 = (__m256)_mm512_extractf64x4_pd((__m512d)__W, 1); \
- __m256 __t3 = __t1 op __t2; \
- __m128 __t4 = _mm256_extractf128_ps(__t3, 0); \
- __m128 __t5 = _mm256_extractf128_ps(__t3, 1); \
- __m128 __t6 = __t4 op __t5; \
- __m128 __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
- __m128 __t8 = __t6 op __t7; \
- __m128 __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
- __m128 __t10 = __t8 op __t9; \
- return __t10[0]
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_reduce_add_ps(__m512 __W) {
- _mm512_mask_reduce_operator(+);
+ return __builtin_ia32_reduce_fadd_ps512(0.0f, __W);
}
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_reduce_mul_ps(__m512 __W) {
- _mm512_mask_reduce_operator(*);
+ return __builtin_ia32_reduce_fmul_ps512(1.0f, __W);
}
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) {
__W = _mm512_maskz_mov_ps(__M, __W);
- _mm512_mask_reduce_operator(+);
+ return __builtin_ia32_reduce_fadd_ps512(0.0f, __W);
}
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
__W = _mm512_mask_mov_ps(_mm512_set1_ps(1.0f), __M, __W);
- _mm512_mask_reduce_operator(*);
+ return __builtin_ia32_reduce_fmul_ps512(1.0f, __W);
}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
- __m512i __t1 = (__m512i)__builtin_shufflevector((__v8di)__V, (__v8di)__V, 4, 5, 6, 7, 0, 1, 2, 3); \
- __m512i __t2 = _mm512_##op(__V, __t1); \
- __m512i __t3 = (__m512i)__builtin_shufflevector((__v8di)__t2, (__v8di)__t2, 2, 3, 0, 1, 6, 7, 4, 5); \
- __m512i __t4 = _mm512_##op(__t2, __t3); \
- __m512i __t5 = (__m512i)__builtin_shufflevector((__v8di)__t4, (__v8di)__t4, 1, 0, 3, 2, 5, 4, 7, 6); \
- __v8di __t6 = (__v8di)_mm512_##op(__t4, __t5); \
- return __t6[0]
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epi64(__m512i __V) {
- _mm512_mask_reduce_operator(max_epi64);
+ return __builtin_ia32_reduce_smax_q512(__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epu64(__m512i __V) {
- _mm512_mask_reduce_operator(max_epu64);
+ return __builtin_ia32_reduce_umax_q512(__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epi64(__m512i __V) {
- _mm512_mask_reduce_operator(min_epi64);
+ return __builtin_ia32_reduce_smin_q512(__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epu64(__m512i __V) {
- _mm512_mask_reduce_operator(min_epu64);
+ return __builtin_ia32_reduce_umin_q512(__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
- _mm512_mask_reduce_operator(max_epi64);
+ return __builtin_ia32_reduce_smax_q512(__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
__V = _mm512_maskz_mov_epi64(__M, __V);
- _mm512_mask_reduce_operator(max_epu64);
+ return __builtin_ia32_reduce_umax_q512(__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
- _mm512_mask_reduce_operator(min_epi64);
+ return __builtin_ia32_reduce_smin_q512(__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
- _mm512_mask_reduce_operator(min_epu64);
+ return __builtin_ia32_reduce_umin_q512(__V);
}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
- __m256i __t1 = _mm512_extracti64x4_epi64(__V, 0); \
- __m256i __t2 = _mm512_extracti64x4_epi64(__V, 1); \
- __m256i __t3 = _mm256_##op(__t1, __t2); \
- __m128i __t4 = _mm256_extracti128_si256(__t3, 0); \
- __m128i __t5 = _mm256_extracti128_si256(__t3, 1); \
- __m128i __t6 = _mm_##op(__t4, __t5); \
- __m128i __t7 = (__m128i)__builtin_shufflevector((__v4si)__t6, (__v4si)__t6, 2, 3, 0, 1); \
- __m128i __t8 = _mm_##op(__t6, __t7); \
- __m128i __t9 = (__m128i)__builtin_shufflevector((__v4si)__t8, (__v4si)__t8, 1, 0, 3, 2); \
- __v4si __t10 = (__v4si)_mm_##op(__t8, __t9); \
- return __t10[0]
-
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epi32(__m512i __V) {
- _mm512_mask_reduce_operator(max_epi32);
+ return __builtin_ia32_reduce_smax_d512((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epu32(__m512i __V) {
- _mm512_mask_reduce_operator(max_epu32);
+ return __builtin_ia32_reduce_umax_d512((__v16si)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epi32(__m512i __V) {
- _mm512_mask_reduce_operator(min_epi32);
+ return __builtin_ia32_reduce_smin_d512((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epu32(__m512i __V) {
- _mm512_mask_reduce_operator(min_epu32);
+ return __builtin_ia32_reduce_umin_d512((__v16si)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
__V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
- _mm512_mask_reduce_operator(max_epi32);
+ return __builtin_ia32_reduce_smax_d512((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
__V = _mm512_maskz_mov_epi32(__M, __V);
- _mm512_mask_reduce_operator(max_epu32);
+ return __builtin_ia32_reduce_umax_d512((__v16si)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
__V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
- _mm512_mask_reduce_operator(min_epi32);
+ return __builtin_ia32_reduce_smin_d512((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
__V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
- _mm512_mask_reduce_operator(min_epu32);
+ return __builtin_ia32_reduce_umin_d512((__v16si)__V);
}
-#undef _mm512_mask_reduce_operator
#define _mm512_mask_reduce_operator(op) \
__m256d __t1 = _mm512_extractf64x4_pd(__V, 0); \
diff --git a/clang/lib/Headers/avx512vlvnniintrin.h b/clang/lib/Headers/avx512vlvnniintrin.h
index b7c8fa08c653..71ac1b4370d4 100644
--- a/clang/lib/Headers/avx512vlvnniintrin.h
+++ b/clang/lib/Headers/avx512vlvnniintrin.h
@@ -18,13 +18,157 @@
#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni"), __min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni"), __min_vector_width__(256)))
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
+/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
+/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in \a S, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
+/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
+/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2]))
+/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3]))
+/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+/// ENDFOR
+/// DST[MAX:256] := 0
+/// \endoperation
+#define _mm256_dpbusd_epi32(S, A, B) \
+ (__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B))
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_dpbusd_epi32(__m256i __S, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_vpdpbusd256((__v8si)__S, (__v8si)__A,
- (__v8si)__B);
-}
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
+/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
+/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in \a S using signed saturation, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
+/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
+/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2]))
+/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3]))
+/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+/// ENDFOR
+/// DST[MAX:256] := 0
+/// \endoperation
+#define _mm256_dpbusds_epi32(S, A, B) \
+ (__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
+/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
+/// results. Sum these 2 results with the corresponding 32-bit integer in \a S,
+/// and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
+/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
+/// DST.dword[j] := S.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// DST[MAX:256] := 0
+/// \endoperation
+#define _mm256_dpwssd_epi32(S, A, B) \
+ (__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
+/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
+/// results. Sum these 2 results with the corresponding 32-bit integer in \a S
+/// using signed saturation, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
+/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
+/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// DST[MAX:256] := 0
+/// \endoperation
+#define _mm256_dpwssds_epi32(S, A, B) \
+ (__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
+/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
+/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in \a S, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 3
+/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
+/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
+/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2]))
+/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3]))
+/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+/// ENDFOR
+/// DST[MAX:128] := 0
+/// \endoperation
+#define _mm_dpbusd_epi32(S, A, B) \
+ (__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
+/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
+/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in \a S using signed saturation, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 3
+/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
+/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
+/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2]))
+/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3]))
+/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+/// ENDFOR
+/// DST[MAX:128] := 0
+/// \endoperation
+#define _mm_dpbusds_epi32(S, A, B) \
+ (__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
+/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
+/// results. Sum these 2 results with the corresponding 32-bit integer in \a S,
+/// and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 3
+/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
+/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
+/// DST.dword[j] := S.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// DST[MAX:128] := 0
+/// \endoperation
+#define _mm_dpwssd_epi32(S, A, B) \
+ (__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
+/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
+/// results. Sum these 2 results with the corresponding 32-bit integer in \a S
+/// using signed saturation, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 3
+/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
+/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
+/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// DST[MAX:128] := 0
+/// \endoperation
+#define _mm_dpwssds_epi32(S, A, B) \
+ (__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B))
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_dpbusd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
@@ -43,13 +187,6 @@ _mm256_maskz_dpbusd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_dpbusds_epi32(__m256i __S, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_vpdpbusds256((__v8si)__S, (__v8si)__A,
- (__v8si)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_dpbusds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256(__U,
@@ -66,13 +203,6 @@ _mm256_maskz_dpbusds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_dpwssd_epi32(__m256i __S, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_vpdpwssd256((__v8si)__S, (__v8si)__A,
- (__v8si)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_dpwssd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256(__U,
@@ -89,13 +219,6 @@ _mm256_maskz_dpwssd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_dpwssds_epi32(__m256i __S, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_vpdpwssds256((__v8si)__S, (__v8si)__A,
- (__v8si)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_dpwssds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256(__U,
@@ -112,13 +235,6 @@ _mm256_maskz_dpwssds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_dpbusd_epi32(__m128i __S, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_vpdpbusd128((__v4si)__S, (__v4si)__A,
- (__v4si)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_dpbusd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128(__U,
@@ -135,13 +251,6 @@ _mm_maskz_dpbusd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_dpbusds_epi32(__m128i __S, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_vpdpbusds128((__v4si)__S, (__v4si)__A,
- (__v4si)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_dpbusds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128(__U,
@@ -158,13 +267,6 @@ _mm_maskz_dpbusds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_dpwssd_epi32(__m128i __S, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_vpdpwssd128((__v4si)__S, (__v4si)__A,
- (__v4si)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_dpwssd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128(__U,
@@ -181,13 +283,6 @@ _mm_maskz_dpwssd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_dpwssds_epi32(__m128i __S, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_vpdpwssds128((__v4si)__S, (__v4si)__A,
- (__v4si)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_dpwssds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128(__U,
diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h
index 84421bf1b9d5..382b6215751e 100644
--- a/clang/lib/Headers/avxintrin.h
+++ b/clang/lib/Headers/avxintrin.h
@@ -2245,7 +2245,7 @@ _mm256_cvttps_epi32(__m256 __a)
/// Returns the first element of the input vector of [4 x double].
///
-/// \headerfile <avxintrin.h>
+/// \headerfile <x86intrin.h>
///
/// This intrinsic is a utility function and does not correspond to a specific
/// instruction.
@@ -2261,7 +2261,7 @@ _mm256_cvtsd_f64(__m256d __a)
/// Returns the first element of the input vector of [8 x i32].
///
-/// \headerfile <avxintrin.h>
+/// \headerfile <x86intrin.h>
///
/// This intrinsic is a utility function and does not correspond to a specific
/// instruction.
@@ -2278,7 +2278,7 @@ _mm256_cvtsi256_si32(__m256i __a)
/// Returns the first element of the input vector of [8 x float].
///
-/// \headerfile <avxintrin.h>
+/// \headerfile <x86intrin.h>
///
/// This intrinsic is a utility function and does not correspond to a specific
/// instruction.
diff --git a/clang/lib/Headers/avxvnniintrin.h b/clang/lib/Headers/avxvnniintrin.h
new file mode 100644
index 000000000000..ad45cb7962e5
--- /dev/null
+++ b/clang/lib/Headers/avxvnniintrin.h
@@ -0,0 +1,225 @@
+/*===--------------- avxvnniintrin.h - VNNI intrinsics --------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avxvnniintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVXVNNIINTRIN_H
+#define __AVXVNNIINTRIN_H
+
+/* Below intrinsics defined in avx512vlvnniintrin.h can be used for AVXVNNI */
+/// \fn __m256i _mm256_dpbusd_epi32(__m256i __S, __m256i __A, __m256i __B)
+/// \fn __m256i _mm256_dpbusds_epi32(__m256i __S, __m256i __A, __m256i __B)
+/// \fn __m256i _mm256_dpwssd_epi32(__m256i __S, __m256i __A, __m256i __B)
+/// \fn __m256i _mm256_dpwssds_epi32(__m256i __S, __m256i __A, __m256i __B)
+/// \fn __m128i _mm_dpbusd_epi32(__m128i __S, __m128i __A, __m128i __B)
+/// \fn __m128i _mm_dpbusds_epi32(__m128i __S, __m128i __A, __m128i __B)
+/// \fn __m128i _mm_dpwssd_epi32(__m128i __S, __m128i __A, __m128i __B)
+/// \fn __m128i _mm_dpwssds_epi32(__m128i __S, __m128i __A, __m128i __B)
+
+/* Intrinsics with _avx_ prefix are for compatibility with msvc. */
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avxvnni"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avxvnni"), __min_vector_width__(128)))
+
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with
+/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed
+/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in \a __S, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
+/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
+/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]))
+/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]))
+/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+/// ENDFOR
+/// DST[MAX:256] := 0
+/// \endoperation
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_vpdpbusd256((__v8si)__S, (__v8si)__A, (__v8si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with
+/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed
+/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in \a __S using signed saturation, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
+/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
+/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]))
+/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]))
+/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+/// ENDFOR
+/// DST[MAX:256] := 0
+/// \endoperation
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_vpdpbusds256((__v8si)__S, (__v8si)__A, (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit
+/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S,
+/// and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// DST[MAX:256] := 0
+/// \endoperation
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_vpdpwssd256((__v8si)__S, (__v8si)__A, (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit
+/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S
+/// using signed saturation, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// DST[MAX:256] := 0
+/// \endoperation
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_vpdpwssds256((__v8si)__S, (__v8si)__A, (__v8si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with
+/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed
+/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in \a __S, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 3
+/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
+/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
+/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]))
+/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]))
+/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+/// ENDFOR
+/// DST[MAX:128] := 0
+/// \endoperation
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpdpbusd128((__v4si)__S, (__v4si)__A, (__v4si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with
+/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed
+/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in \a __S using signed saturation, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 3
+/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
+/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
+/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]))
+/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]))
+/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+/// ENDFOR
+/// DST[MAX:128] := 0
+/// \endoperation
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpdpbusds128((__v4si)__S, (__v4si)__A, (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit
+/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S,
+/// and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 3
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// DST[MAX:128] := 0
+/// \endoperation
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpdpwssd128((__v4si)__S, (__v4si)__A, (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit
+/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S
+/// using signed saturation, and store the packed 32-bit results in DST.
+///
+/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 3
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// DST[MAX:128] := 0
+/// \endoperation
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_dpwssds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpdpwssds128((__v4si)__S, (__v4si)__A, (__v4si)__B);
+}
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+
+#endif // __AVXVNNIINTRIN_H
diff --git a/clang/lib/Headers/cpuid.h b/clang/lib/Headers/cpuid.h
index 2a88c042d046..34f0e76807c5 100644
--- a/clang/lib/Headers/cpuid.h
+++ b/clang/lib/Headers/cpuid.h
@@ -7,6 +7,9 @@
*===-----------------------------------------------------------------------===
*/
+#ifndef __CPUID_H
+#define __CPUID_H
+
#if !(__x86_64__ || __i386__)
#error this header is for x86 only
#endif
@@ -186,6 +189,7 @@
/* Features in %edx for leaf 7 sub-leaf 0 */
#define bit_AVX5124VNNIW 0x00000004
#define bit_AVX5124FMAPS 0x00000008
+#define bit_UINTR 0x00000020
#define bit_SERIALIZE 0x00004000
#define bit_TSXLDTRK 0x00010000
#define bit_PCONFIG 0x00040000
@@ -195,7 +199,9 @@
#define bit_AMXINT8 0x02000000
/* Features in %eax for leaf 7 sub-leaf 1 */
+#define bit_AVXVNNI 0x00000008
#define bit_AVX512BF16 0x00000020
+#define bit_HRESET 0x00400000
/* Features in %eax for leaf 13 sub-leaf 1 */
#define bit_XSAVEOPT 0x00000001
@@ -309,3 +315,5 @@ static __inline int __get_cpuid_count (unsigned int __leaf,
__cpuid_count(__leaf, __subleaf, *__eax, *__ebx, *__ecx, *__edx);
return 1;
}
+
+#endif /* __CPUID_H */
diff --git a/clang/lib/Headers/cuda_wrappers/algorithm b/clang/lib/Headers/cuda_wrappers/algorithm
index 01af18360d8d..f14a0b00bb04 100644
--- a/clang/lib/Headers/cuda_wrappers/algorithm
+++ b/clang/lib/Headers/cuda_wrappers/algorithm
@@ -1,4 +1,4 @@
-/*===---- complex - CUDA wrapper for <algorithm> ----------------------------===
+/*===---- algorithm - CUDA wrapper for <algorithm> -------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/clang/lib/Headers/cuda_wrappers/new b/clang/lib/Headers/cuda_wrappers/new
index f49811c5a57c..d5fb3b7011de 100644
--- a/clang/lib/Headers/cuda_wrappers/new
+++ b/clang/lib/Headers/cuda_wrappers/new
@@ -1,4 +1,4 @@
-/*===---- complex - CUDA wrapper for <new> ------------------------------===
+/*===---- new - CUDA wrapper for <new> -------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -26,6 +26,13 @@
#include_next <new>
+#if !defined(__device__)
+// The header has been included too early from the standard C++ library
+// and CUDA-specific macros are not available yet.
+// Undo the include guard and try again later.
+#undef __CLANG_CUDA_WRAPPERS_NEW
+#else
+
#pragma push_macro("CUDA_NOEXCEPT")
#if __cplusplus >= 201103L
#define CUDA_NOEXCEPT noexcept
@@ -95,4 +102,5 @@ __device__ inline void operator delete[](void *, void *) CUDA_NOEXCEPT {}
#pragma pop_macro("CUDA_NOEXCEPT")
+#endif // __device__
#endif // include guard
diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h
index 73a777b107c6..bb759721faeb 100644
--- a/clang/lib/Headers/emmintrin.h
+++ b/clang/lib/Headers/emmintrin.h
@@ -4025,7 +4025,7 @@ _mm_storeu_si128(__m128i_u *__p, __m128i __b)
///
/// \param __p
/// A pointer to a 64-bit memory location. The address of the memory
-/// location does not have to be algned.
+/// location does not have to be aligned.
/// \param __b
/// A 128-bit integer vector containing the value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
diff --git a/clang/lib/Headers/gfniintrin.h b/clang/lib/Headers/gfniintrin.h
index 9bff0fcb603e..11a321b7c919 100644
--- a/clang/lib/Headers/gfniintrin.h
+++ b/clang/lib/Headers/gfniintrin.h
@@ -14,38 +14,56 @@
#ifndef __GFNIINTRIN_H
#define __GFNIINTRIN_H
+/* Default attributes for simple form (no masking). */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("gfni"), __min_vector_width__(128)))
+
+/* Default attributes for YMM unmasked form. */
+#define __DEFAULT_FN_ATTRS_Y __attribute__((__always_inline__, __nodebug__, __target__("avx,gfni"), __min_vector_width__(256)))
+
+/* Default attributes for ZMM forms. */
+#define __DEFAULT_FN_ATTRS_Z __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,gfni"), __min_vector_width__(512)))
+
+/* Default attributes for VLX forms. */
+#define __DEFAULT_FN_ATTRS_VL128 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS_VL256 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(256)))
#define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \
(__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), \
(char)(I))
-#define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
- (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
- (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
- (__v16qi)(__m128i)(S))
-
-
-#define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
- (__m128i)_mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
- U, A, B, I)
+#define _mm_gf2p8affine_epi64_epi8(A, B, I) \
+ (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), \
+ (char)(I))
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A,
+ (__v16qi) __B);
+}
+#ifdef __AVXINTRIN_H
#define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \
(__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
(__v32qi)(__m256i)(B), \
(char)(I))
-#define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
- (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
- (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
- (__v32qi)(__m256i)(S))
-
-#define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
- (__m256i)_mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
- U, A, B, I)
+#define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
+ (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
+ (__v32qi)(__m256i)(B), \
+ (char)(I))
+static __inline__ __m256i __DEFAULT_FN_ATTRS_Y
+_mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi) __A,
+ (__v32qi) __B);
+}
+#endif /* __AVXINTRIN_H */
+#ifdef __AVX512BWINTRIN_H
#define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \
(__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \
(__v64qi)(__m512i)(B), \
@@ -60,27 +78,71 @@
(__m512i)_mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \
U, A, B, I)
-#define _mm_gf2p8affine_epi64_epi8(A, B, I) \
- (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), \
+#define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
+ (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), \
(char)(I))
+#define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
+ (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+ (__v64qi)_mm512_gf2p8affine_epi64_epi8(A, B, I), \
+ (__v64qi)(__m512i)(S))
+
+#define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
+ (__m512i)_mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
+ U, A, B, I)
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
+_mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi) __A,
+ (__v64qi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
+_mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_selectb_512(__U,
+ (__v64qi) _mm512_gf2p8mul_epi8(__A, __B),
+ (__v64qi) __S);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
+_mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(),
+ __U, __A, __B);
+}
+#endif /* __AVX512BWINTRIN_H */
+
+#ifdef __AVX512VLBWINTRIN_H
+#define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
+ (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+ (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
+ (__v16qi)(__m128i)(S))
+
+#define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
+ (__m128i)_mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
+ U, A, B, I)
+
+#define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
+ (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+ (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
+ (__v32qi)(__m256i)(S))
+
+#define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
+ (__m256i)_mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
+ U, A, B, I)
+
#define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
(__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
(__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \
(__v16qi)(__m128i)(S))
-
#define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
(__m128i)_mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), \
U, A, B, I)
-
-#define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
- (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
- (__v32qi)(__m256i)(B), \
- (char)(I))
-
#define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
(__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
(__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \
@@ -90,41 +152,6 @@
(__m256i)_mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \
U, A, B, I)
-
-#define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
- (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), \
- (char)(I))
-
-#define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
- (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
- (__v64qi)_mm512_gf2p8affine_epi64_epi8(A, B, I), \
- (__v64qi)(__m512i)(S))
-
-#define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
- (__m512i)_mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
- U, A, B, I)
-
-/* Default attributes for simple form (no masking). */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("gfni"), __min_vector_width__(128)))
-
-/* Default attributes for YMM unmasked form. */
-#define __DEFAULT_FN_ATTRS_Y __attribute__((__always_inline__, __nodebug__, __target__("avx,gfni"), __min_vector_width__(256)))
-
-/* Default attributes for ZMM forms. */
-#define __DEFAULT_FN_ATTRS_Z __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,gfni"), __min_vector_width__(512)))
-
-/* Default attributes for VLX forms. */
-#define __DEFAULT_FN_ATTRS_VL128 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_VL256 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(256)))
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
-{
- return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A,
- (__v16qi) __B);
-}
-
static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
_mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B)
{
@@ -140,13 +167,6 @@ _mm_maskz_gf2p8mul_epi8(__mmask16 __U, __m128i __A, __m128i __B)
__U, __A, __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS_Y
-_mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
-{
- return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi) __A,
- (__v32qi) __B);
-}
-
static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
_mm256_mask_gf2p8mul_epi8(__m256i __S, __mmask32 __U, __m256i __A, __m256i __B)
{
@@ -161,28 +181,7 @@ _mm256_maskz_gf2p8mul_epi8(__mmask32 __U, __m256i __A, __m256i __B)
return _mm256_mask_gf2p8mul_epi8((__m256i)_mm256_setzero_si256(),
__U, __A, __B);
}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
-_mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi) __A,
- (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
-_mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_selectb_512(__U,
- (__v64qi) _mm512_gf2p8mul_epi8(__A, __B),
- (__v64qi) __S);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
-_mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B)
-{
- return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(),
- __U, __A, __B);
-}
+#endif /* __AVX512VLBWINTRIN_H */
#undef __DEFAULT_FN_ATTRS
#undef __DEFAULT_FN_ATTRS_Y
diff --git a/clang/lib/Headers/hresetintrin.h b/clang/lib/Headers/hresetintrin.h
new file mode 100644
index 000000000000..13e31a2e03ad
--- /dev/null
+++ b/clang/lib/Headers/hresetintrin.h
@@ -0,0 +1,49 @@
+/*===---------------- hresetintrin.h - HRESET intrinsics -------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __X86GPRINTRIN_H
+#error "Never use <hresetintrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef __HRESETINTRIN_H
+#define __HRESETINTRIN_H
+
+#if __has_extension(gnu_asm)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("hreset")))
+
+/// Provides a hint to the processor to selectively reset the prediction
+/// history of the current logical processor specified by a 32-bit integer
+/// value \a __eax.
+///
+/// This intrinsic corresponds to the <c> HRESET </c> instruction.
+///
+/// \operation
+/// IF __eax == 0
+/// // nop
+/// ELSE
+/// FOR i := 0 to 31
+/// IF __eax[i]
+/// ResetPredictionFeature(i)
+/// FI
+/// ENDFOR
+/// FI
+/// \endoperation
+static __inline void __DEFAULT_FN_ATTRS
+_hreset(int __eax)
+{
+ __asm__ ("hreset $0" :: "a"(__eax));
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __has_extension(gnu_asm) */
+
+#endif /* __HRESETINTRIN_H */
diff --git a/clang/lib/Headers/ia32intrin.h b/clang/lib/Headers/ia32intrin.h
index 79b7f0655cf0..00138effd505 100644
--- a/clang/lib/Headers/ia32intrin.h
+++ b/clang/lib/Headers/ia32intrin.h
@@ -14,6 +14,18 @@
#ifndef __IA32INTRIN_H
#define __IA32INTRIN_H
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS_SSE42 __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__)) constexpr
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__))
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
/** Find the first set bit starting from the lsb. Result is undefined if
* input is 0.
*
@@ -26,7 +38,7 @@
* A 32-bit integer operand.
* \returns A 32-bit integer containing the bit number.
*/
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsfd(int __A) {
return __builtin_ctz(__A);
}
@@ -43,7 +55,7 @@ __bsfd(int __A) {
* A 32-bit integer operand.
* \returns A 32-bit integer containing the bit number.
*/
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsrd(int __A) {
return 31 - __builtin_clz(__A);
}
@@ -59,12 +71,12 @@ __bsrd(int __A) {
* A 32-bit integer operand.
* \returns A 32-bit integer containing the swapped bytes.
*/
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bswapd(int __A) {
return __builtin_bswap32(__A);
}
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
_bswap(int __A) {
return __builtin_bswap32(__A);
}
@@ -85,7 +97,7 @@ _bswap(int __A) {
* A 64-bit integer operand.
* \returns A 32-bit integer containing the bit number.
*/
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsfq(long long __A) {
return __builtin_ctzll(__A);
}
@@ -102,7 +114,7 @@ __bsfq(long long __A) {
* A 64-bit integer operand.
* \returns A 32-bit integer containing the bit number.
*/
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsrq(long long __A) {
return 63 - __builtin_clzll(__A);
}
@@ -118,7 +130,7 @@ __bsrq(long long __A) {
* A 64-bit integer operand.
* \returns A 64-bit integer containing the swapped bytes.
*/
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
__bswapq(long long __A) {
return __builtin_bswap64(__A);
}
@@ -138,7 +150,7 @@ __bswapq(long long __A) {
* \returns A 32-bit integer containing the number of bits with value 1 in the
* source operand.
*/
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__popcntd(unsigned int __A)
{
return __builtin_popcount(__A);
@@ -159,7 +171,7 @@ __popcntd(unsigned int __A)
* \returns A 64-bit integer containing the number of bits with value 1 in the
* source operand.
*/
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
__popcntq(unsigned long long __A)
{
return __builtin_popcountll(__A);
@@ -169,26 +181,26 @@ __popcntq(unsigned long long __A)
#endif /* __x86_64__ */
#ifdef __x86_64__
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__readeflags(void)
{
return __builtin_ia32_readeflags_u64();
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __DEFAULT_FN_ATTRS
__writeeflags(unsigned long long __f)
{
__builtin_ia32_writeeflags_u64(__f);
}
#else /* !__x86_64__ */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
__readeflags(void)
{
return __builtin_ia32_readeflags_u32();
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __DEFAULT_FN_ATTRS
__writeeflags(unsigned int __f)
{
__builtin_ia32_writeeflags_u32(__f);
@@ -205,11 +217,9 @@ __writeeflags(unsigned int __f)
* A 32-bit float value.
* \returns a 32-bit unsigned integer containing the converted value.
*/
-static __inline__ unsigned int __attribute__((__always_inline__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST
_castf32_u32(float __A) {
- unsigned int D;
- __builtin_memcpy(&D, &__A, sizeof(__A));
- return D;
+ return __builtin_bit_cast(unsigned int, __A);
}
/** Cast a 64-bit float value to a 64-bit unsigned integer value
@@ -222,11 +232,9 @@ _castf32_u32(float __A) {
* A 64-bit float value.
* \returns a 64-bit unsigned integer containing the converted value.
*/
-static __inline__ unsigned long long __attribute__((__always_inline__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST
_castf64_u64(double __A) {
- unsigned long long D;
- __builtin_memcpy(&D, &__A, sizeof(__A));
- return D;
+ return __builtin_bit_cast(unsigned long long, __A);
}
/** Cast a 32-bit unsigned integer value to a 32-bit float value
@@ -239,11 +247,9 @@ _castf64_u64(double __A) {
* A 32-bit unsigned integer value.
* \returns a 32-bit float value containing the converted value.
*/
-static __inline__ float __attribute__((__always_inline__))
+static __inline__ float __DEFAULT_FN_ATTRS_CAST
_castu32_f32(unsigned int __A) {
- float D;
- __builtin_memcpy(&D, &__A, sizeof(__A));
- return D;
+ return __builtin_bit_cast(float, __A);
}
/** Cast a 64-bit unsigned integer value to a 64-bit float value
@@ -256,11 +262,9 @@ _castu32_f32(unsigned int __A) {
* A 64-bit unsigned integer value.
* \returns a 64-bit float value containing the converted value.
*/
-static __inline__ double __attribute__((__always_inline__))
+static __inline__ double __DEFAULT_FN_ATTRS_CAST
_castu64_f64(unsigned long long __A) {
- double D;
- __builtin_memcpy(&D, &__A, sizeof(__A));
- return D;
+ return __builtin_bit_cast(double, __A);
}
/** Adds the unsigned integer operand to the CRC-32C checksum of the
@@ -278,7 +282,7 @@ _castu64_f64(unsigned long long __A) {
* \returns The result of adding operand \a __C to the CRC-32C checksum of
* operand \a __D.
*/
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
__crc32b(unsigned int __C, unsigned char __D)
{
return __builtin_ia32_crc32qi(__C, __D);
@@ -299,7 +303,7 @@ __crc32b(unsigned int __C, unsigned char __D)
* \returns The result of adding operand \a __C to the CRC-32C checksum of
* operand \a __D.
*/
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
__crc32w(unsigned int __C, unsigned short __D)
{
return __builtin_ia32_crc32hi(__C, __D);
@@ -320,7 +324,7 @@ __crc32w(unsigned int __C, unsigned short __D)
* \returns The result of adding operand \a __C to the CRC-32C checksum of
* operand \a __D.
*/
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
__crc32d(unsigned int __C, unsigned int __D)
{
return __builtin_ia32_crc32si(__C, __D);
@@ -342,20 +346,20 @@ __crc32d(unsigned int __C, unsigned int __D)
* \returns The result of adding operand \a __C to the CRC-32C checksum of
* operand \a __D.
*/
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_SSE42
__crc32q(unsigned long long __C, unsigned long long __D)
{
return __builtin_ia32_crc32di(__C, __D);
}
#endif /* __x86_64__ */
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__rdpmc(int __A) {
return __builtin_ia32_rdpmc(__A);
}
/* __rdtscp */
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__rdtscp(unsigned int *__A) {
return __builtin_ia32_rdtscp(__A);
}
@@ -364,48 +368,48 @@ __rdtscp(unsigned int *__A) {
#define _rdpmc(A) __rdpmc(A)
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __DEFAULT_FN_ATTRS
_wbinvd(void) {
__builtin_ia32_wbinvd();
}
-static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR
__rolb(unsigned char __X, int __C) {
return __builtin_rotateleft8(__X, __C);
}
-static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR
__rorb(unsigned char __X, int __C) {
return __builtin_rotateright8(__X, __C);
}
-static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR
__rolw(unsigned short __X, int __C) {
return __builtin_rotateleft16(__X, __C);
}
-static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR
__rorw(unsigned short __X, int __C) {
return __builtin_rotateright16(__X, __C);
}
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
__rold(unsigned int __X, int __C) {
return __builtin_rotateleft32(__X, __C);
}
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
__rord(unsigned int __X, int __C) {
return __builtin_rotateright32(__X, __C);
}
#ifdef __x86_64__
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
__rolq(unsigned long long __X, int __C) {
return __builtin_rotateleft64(__X, __C);
}
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
__rorq(unsigned long long __X, int __C) {
return __builtin_rotateright64(__X, __C);
}
@@ -429,4 +433,9 @@ __rorq(unsigned long long __X, int __C) {
#define _rotwl(a,b) __rolw((a), (b))
#define _rotwr(a,b) __rorw((a), (b))
+#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CAST
+#undef __DEFAULT_FN_ATTRS_SSE42
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
+
#endif /* __IA32INTRIN_H */
diff --git a/clang/lib/Headers/immintrin.h b/clang/lib/Headers/immintrin.h
index e9dff2310fdf..22f7a520c929 100644
--- a/clang/lib/Headers/immintrin.h
+++ b/clang/lib/Headers/immintrin.h
@@ -10,6 +10,8 @@
#ifndef __IMMINTRIN_H
#define __IMMINTRIN_H
+#include <x86gprintrin.h>
+
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__MMX__)
#include <mmintrin.h>
@@ -144,6 +146,11 @@
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVXVNNI__)
+#include <avxvnniintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__AVX512DQ__)
#include <avx512dqintrin.h>
#endif
@@ -472,6 +479,11 @@ _storebe_i64(void * __P, long long __D) {
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__KL__) || defined(__WIDEKL__)
+#include <keylockerintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
#include <amxintrin.h>
#endif
diff --git a/clang/lib/Headers/intrin.h b/clang/lib/Headers/intrin.h
index 871b47ca8267..a78b96997d18 100644
--- a/clang/lib/Headers/intrin.h
+++ b/clang/lib/Headers/intrin.h
@@ -57,16 +57,11 @@ void __addfsbyte(unsigned long, unsigned char);
void __addfsdword(unsigned long, unsigned long);
void __addfsword(unsigned long, unsigned short);
void __code_seg(const char *);
-static __inline__
void __cpuid(int[4], int);
-static __inline__
void __cpuidex(int[4], int, int);
-static __inline__
__int64 __emul(int, int);
-static __inline__
unsigned __int64 __emulu(unsigned int, unsigned int);
unsigned int __getcallerseflags(void);
-static __inline__
void __halt(void);
unsigned char __inbyte(unsigned short);
void __inbytestring(unsigned short, unsigned char *, unsigned long);
@@ -82,13 +77,9 @@ void __inwordstring(unsigned short, unsigned short *, unsigned long);
void __lidt(void *);
unsigned __int64 __ll_lshift(unsigned __int64, int);
__int64 __ll_rshift(__int64, int);
-static __inline__
void __movsb(unsigned char *, unsigned char const *, size_t);
-static __inline__
void __movsd(unsigned long *, unsigned long const *, size_t);
-static __inline__
void __movsw(unsigned short *, unsigned short const *, size_t);
-static __inline__
void __nop(void);
void __nvreg_restore_fence(void);
void __nvreg_save_fence(void);
@@ -105,23 +96,16 @@ unsigned long __readcr4(void);
unsigned long __readcr8(void);
unsigned int __readdr(unsigned int);
#ifdef __i386__
-static __inline__
unsigned char __readfsbyte(unsigned long);
-static __inline__
unsigned __int64 __readfsqword(unsigned long);
-static __inline__
unsigned short __readfsword(unsigned long);
#endif
-static __inline__
unsigned __int64 __readmsr(unsigned long);
unsigned __int64 __readpmc(unsigned long);
unsigned long __segmentlimit(unsigned long);
void __sidt(void *);
-static __inline__
void __stosb(unsigned char *, unsigned char, size_t);
-static __inline__
void __stosd(unsigned long *, unsigned long, size_t);
-static __inline__
void __stosw(unsigned short *, unsigned short, size_t);
void __svm_clgi(void);
void __svm_invlpga(void *, int);
@@ -136,7 +120,6 @@ void __vmx_off(void);
void __vmx_vmptrst(unsigned __int64 *);
void __wbinvd(void);
void __writecr0(unsigned int);
-static __inline__
void __writecr3(unsigned __INTPTR_TYPE__);
void __writecr4(unsigned int);
void __writecr8(unsigned int);
@@ -146,11 +129,8 @@ void __writefsdword(unsigned long, unsigned long);
void __writefsqword(unsigned long, unsigned __int64);
void __writefsword(unsigned long, unsigned short);
void __writemsr(unsigned long, unsigned __int64);
-static __inline__
void *_AddressOfReturnAddress(void);
-static __inline__
unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
-static __inline__
unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
unsigned char _bittest(long const *, long);
unsigned char _bittestandcomplement(long *, long);
@@ -169,12 +149,10 @@ long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
-static __inline__ void
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_ReadBarrier(void);
-static __inline__ void
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_ReadWriteBarrier(void);
+void __attribute__((__deprecated__(
+ "use other intrinsics or C++11 atomics instead"))) _ReadBarrier(void);
+void __attribute__((__deprecated__(
+ "use other intrinsics or C++11 atomics instead"))) _ReadWriteBarrier(void);
unsigned int _rorx_u32(unsigned int, const unsigned int);
int _sarx_i32(int, unsigned int);
#if __STDC_HOSTED__
@@ -185,9 +163,8 @@ unsigned int _shrx_u32(unsigned int, unsigned int);
void _Store_HLERelease(long volatile *, long);
void _Store64_HLERelease(__int64 volatile *, __int64);
void _StorePointer_HLERelease(void *volatile *, void *);
-static __inline__ void
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_WriteBarrier(void);
+void __attribute__((__deprecated__(
+ "use other intrinsics or C++11 atomics instead"))) _WriteBarrier(void);
unsigned __int32 xbegin(void);
void _xend(void);
@@ -197,19 +174,14 @@ void __addgsbyte(unsigned long, unsigned char);
void __addgsdword(unsigned long, unsigned long);
void __addgsqword(unsigned long, unsigned __int64);
void __addgsword(unsigned long, unsigned short);
-static __inline__
void __faststorefence(void);
void __incgsbyte(unsigned long);
void __incgsdword(unsigned long);
void __incgsqword(unsigned long);
void __incgsword(unsigned long);
-static __inline__
void __movsq(unsigned long long *, unsigned long long const *, size_t);
-static __inline__
unsigned char __readgsbyte(unsigned long);
-static __inline__
unsigned long __readgsdword(unsigned long);
-static __inline__
unsigned __int64 __readgsqword(unsigned long);
unsigned short __readgsword(unsigned long);
unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
@@ -218,7 +190,6 @@ unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
unsigned __int64 _HighPart,
unsigned char _Shift);
-static __inline__
void __stosq(unsigned __int64 *, unsigned __int64, size_t);
unsigned char __vmx_on(unsigned __int64 *);
unsigned char __vmx_vmclear(unsigned __int64 *);
@@ -243,10 +214,6 @@ unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
long _Comparand);
-unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
- __int64 _ExchangeHigh,
- __int64 _ExchangeLow,
- __int64 *_CompareandResult);
unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
__int64 _ExchangeHigh,
__int64 _ExchangeLow,
@@ -269,13 +236,9 @@ unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
__int64 _sarx_i64(__int64, unsigned int);
unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
-static __inline__
__int64 __mulh(__int64, __int64);
-static __inline__
unsigned __int64 __umulh(unsigned __int64, unsigned __int64);
-static __inline__
__int64 _mul128(__int64, __int64, __int64*);
-static __inline__
unsigned __int64 _umul128(unsigned __int64,
unsigned __int64,
unsigned __int64*);
@@ -284,29 +247,19 @@ unsigned __int64 _umul128(unsigned __int64,
#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__
unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
-static __inline__
unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
#endif
#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__
__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
-static __inline__
__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
-static __inline__
__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
-static __inline__
__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);
-static __inline__
__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
-static __inline__
__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
-static __inline__
__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
-static __inline__
__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
#endif
@@ -470,45 +423,81 @@ __int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
__int64 _Exchange, __int64 _Comparand);
#endif
+#if defined(__x86_64__) || defined(__aarch64__)
+unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+#endif
+#if defined(__aarch64__)
+unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+#endif
/*----------------------------------------------------------------------------*\
|* movs, stos
\*----------------------------------------------------------------------------*/
#if defined(__i386__) || defined(__x86_64__)
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
+static __inline__ void __DEFAULT_FN_ATTRS __movsb(unsigned char *__dst,
+ unsigned char const *__src,
+ size_t __n) {
__asm__ __volatile__("rep movsb" : "+D"(__dst), "+S"(__src), "+c"(__n)
: : "memory");
}
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
- __asm__ __volatile__("rep movsl" : "+D"(__dst), "+S"(__src), "+c"(__n)
- : : "memory");
+static __inline__ void __DEFAULT_FN_ATTRS __movsd(unsigned long *__dst,
+ unsigned long const *__src,
+ size_t __n) {
+ __asm__ __volatile__("rep movsl"
+ : "+D"(__dst), "+S"(__src), "+c"(__n)
+ :
+ : "memory");
}
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
- __asm__ __volatile__("rep movsw" : "+D"(__dst), "+S"(__src), "+c"(__n)
- : : "memory");
+static __inline__ void __DEFAULT_FN_ATTRS __movsw(unsigned short *__dst,
+ unsigned short const *__src,
+ size_t __n) {
+ __asm__ __volatile__("rep movsw"
+ : "+D"(__dst), "+S"(__src), "+c"(__n)
+ :
+ : "memory");
}
-static __inline__ void __DEFAULT_FN_ATTRS
-__stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
- __asm__ __volatile__("rep stosl" : "+D"(__dst), "+c"(__n) : "a"(__x)
+static __inline__ void __DEFAULT_FN_ATTRS __stosd(unsigned long *__dst,
+ unsigned long __x,
+ size_t __n) {
+ __asm__ __volatile__("rep stosl"
+ : "+D"(__dst), "+c"(__n)
+ : "a"(__x)
: "memory");
}
-static __inline__ void __DEFAULT_FN_ATTRS
-__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
- __asm__ __volatile__("rep stosw" : "+D"(__dst), "+c"(__n) : "a"(__x)
+static __inline__ void __DEFAULT_FN_ATTRS __stosw(unsigned short *__dst,
+ unsigned short __x,
+ size_t __n) {
+ __asm__ __volatile__("rep stosw"
+ : "+D"(__dst), "+c"(__n)
+ : "a"(__x)
: "memory");
}
#endif
#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
- __asm__ __volatile__("rep movsq" : "+D"(__dst), "+S"(__src), "+c"(__n)
- : : "memory");
+static __inline__ void __DEFAULT_FN_ATTRS __movsq(
+ unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
+ __asm__ __volatile__("rep movsq"
+ : "+D"(__dst), "+S"(__src), "+c"(__n)
+ :
+ : "memory");
}
-static __inline__ void __DEFAULT_FN_ATTRS
-__stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
+static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst,
+ unsigned __int64 __x,
+ size_t __n) {
__asm__ __volatile__("rep stosq" : "+D"(__dst), "+c"(__n) : "a"(__x)
: "memory");
}
@@ -518,26 +507,25 @@ __stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
|* Misc
\*----------------------------------------------------------------------------*/
#if defined(__i386__) || defined(__x86_64__)
-static __inline__ void __DEFAULT_FN_ATTRS
-__cpuid(int __info[4], int __level) {
- __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
- : "a"(__level), "c"(0));
+static __inline__ void __DEFAULT_FN_ATTRS __cpuid(int __info[4], int __level) {
+ __asm__("cpuid"
+ : "=a"(__info[0]), "=b"(__info[1]), "=c"(__info[2]), "=d"(__info[3])
+ : "a"(__level), "c"(0));
}
-static __inline__ void __DEFAULT_FN_ATTRS
-__cpuidex(int __info[4], int __level, int __ecx) {
- __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
- : "a"(__level), "c"(__ecx));
+static __inline__ void __DEFAULT_FN_ATTRS __cpuidex(int __info[4], int __level,
+ int __ecx) {
+ __asm__("cpuid"
+ : "=a"(__info[0]), "=b"(__info[1]), "=c"(__info[2]), "=d"(__info[3])
+ : "a"(__level), "c"(__ecx));
}
-static __inline__ void __DEFAULT_FN_ATTRS
-__halt(void) {
- __asm__ volatile ("hlt");
+static __inline__ void __DEFAULT_FN_ATTRS __halt(void) {
+ __asm__ volatile("hlt");
}
#endif
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
-static __inline__ void __DEFAULT_FN_ATTRS
-__nop(void) {
- __asm__ volatile ("nop");
+static __inline__ void __DEFAULT_FN_ATTRS __nop(void) {
+ __asm__ volatile("nop");
}
#endif
@@ -574,8 +562,7 @@ __readmsr(unsigned long __register) {
}
#endif
-static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS
-__readcr3(void) {
+static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS __readcr3(void) {
unsigned __LPTRINT_TYPE__ __cr3_val;
__asm__ __volatile__ ("mov %%cr3, %0" : "=r"(__cr3_val) : : "memory");
return __cr3_val;
diff --git a/clang/lib/Headers/keylockerintrin.h b/clang/lib/Headers/keylockerintrin.h
new file mode 100644
index 000000000000..c15d39c8e392
--- /dev/null
+++ b/clang/lib/Headers/keylockerintrin.h
@@ -0,0 +1,506 @@
+/*===----------------- keylockerintrin.h - KL Intrinsics -------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <keylockerintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _KEYLOCKERINTRIN_H
+#define _KEYLOCKERINTRIN_H
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__KL__)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("kl"),\
+ __min_vector_width__(128)))
+
+/// Load internal wrapping key from __intkey, __enkey_lo and __enkey_hi. __ctl
+/// will assigned to EAX, whch specifies the KeySource and whether backing up
+/// the key is permitted. The 256-bit encryption key is loaded from the two
+/// explicit operands (__enkey_lo and __enkey_hi). The 128-bit integrity key is
+/// loaded from the implicit operand XMM0 which assigned by __intkey.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LOADIWKEY </c> instructions.
+///
+/// \operation
+/// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode)
+/// GP (0)
+/// FI
+/// IF “LOADIWKEY exiting” VM execution control set
+/// VMexit
+/// FI
+/// IF __ctl[4:1] > 1 // Reserved KeySource encoding used
+/// GP (0)
+/// FI
+/// IF __ctl[31:5] != 0 // Reserved bit in __ctl is set
+/// GP (0)
+/// FI
+/// IF __ctl[0] AND (CPUID.19H.ECX[0] == 0) // NoBackup is not supported on this part
+/// GP (0)
+/// FI
+/// IF (__ctl[4:1] == 1) AND (CPUID.19H.ECX[1] == 0) // KeySource of 1 is not supported on this part
+/// GP (0)
+/// FI
+/// IF (__ctl[4:1] == 0) // KeySource of 0.
+/// IWKey.Encryption Key[127:0] := __enkey_hi[127:0]:
+/// IWKey.Encryption Key[255:128] := __enkey_lo[127:0]
+/// IWKey.IntegrityKey[127:0] := __intkey[127:0]
+/// IWKey.NoBackup := __ctl[0]
+/// IWKey.KeySource := __ctl[4:1]
+/// ZF := 0
+/// ELSE // KeySource of 1. See RDSEED definition for details of randomness
+/// IF HW_NRND_GEN.ready == 1 // Full-entropy random data from RDSEED was received
+/// IWKey.Encryption Key[127:0] := __enkey_hi[127:0] XOR HW_NRND_GEN.data[127:0]
+/// IWKey.Encryption Key[255:128] := __enkey_lo[127:0] XOR HW_NRND_GEN.data[255:128]
+/// IWKey.Encryption Key[255:0] := __enkey_hi[127:0]:__enkey_lo[127:0] XOR HW_NRND_GEN.data[255:0]
+/// IWKey.IntegrityKey[127:0] := __intkey[127:0] XOR HW_NRND_GEN.data[383:256]
+/// IWKey.NoBackup := __ctl[0]
+/// IWKey.KeySource := __ctl[4:1]
+/// ZF := 0
+/// ELSE // Random data was not returned from RDSEED. IWKey was not loaded
+/// ZF := 1
+/// FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
+ __m128i __enkey_lo, __m128i __enkey_hi) {
+ __builtin_ia32_loadiwkey (__intkey, __enkey_lo, __enkey_hi, __ctl);
+}
+
+/// Wrap a 128-bit AES key from __key into a key handle and output in
+/// ((__m128i*)__h) to ((__m128i*)__h) + 5 and a 32-bit value as return.
+/// The explicit source operand __htype specifies handle restrictions.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> ENCODEKEY128 </c> instructions.
+///
+/// \operation
+/// InputKey[127:0] := __key[127:0]
+/// KeyMetadata[2:0] := __htype[2:0]
+/// KeyMetadata[23:3] := 0 // Reserved for future usage
+/// KeyMetadata[27:24] := 0 // KeyType is AES-128 (value of 0)
+/// KeyMetadata[127:28] := 0 // Reserved for future usage
+/// Handle[383:0] := WrapKey128(InputKey[127:0], KeyMetadata[127:0],
+/// IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])
+/// dst[0] := IWKey.NoBackup
+/// dst[4:1] := IWKey.KeySource[3:0]
+/// dst[31:5] := 0
+/// MEM[__h+127:__h] := Handle[127:0] // AAD
+/// MEM[__h+255:__h+128] := Handle[255:128] // Integrity Tag
+/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText
+/// MEM[__h+511:__h+384] := 0 // Reserved for future usage
+/// MEM[__h+639:__h+512] := 0 // Reserved for future usage
+/// MEM[__h+767:__h+640] := 0 // Reserved for future usage
+/// OF := 0
+/// SF := 0
+/// ZF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
+ return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h);
+}
+
+/// Wrap a 256-bit AES key from __key_hi:__key_lo into a key handle, then
+/// output handle in ((__m128i*)__h) to ((__m128i*)__h) + 6 and
+/// a 32-bit value as return.
+/// The explicit source operand __htype specifies handle restrictions.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> ENCODEKEY256 </c> instructions.
+///
+/// \operation
+/// InputKey[127:0] := __key_lo[127:0]
+/// InputKey[255:128] := __key_hi[255:128]
+/// KeyMetadata[2:0] := __htype[2:0]
+/// KeyMetadata[23:3] := 0 // Reserved for future usage
+/// KeyMetadata[27:24] := 1 // KeyType is AES-256 (value of 1)
+/// KeyMetadata[127:28] := 0 // Reserved for future usage
+/// Handle[511:0] := WrapKey256(InputKey[255:0], KeyMetadata[127:0],
+/// IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])
+/// dst[0] := IWKey.NoBackup
+/// dst[4:1] := IWKey.KeySource[3:0]
+/// dst[31:5] := 0
+/// MEM[__h+127:__h] := Handle[127:0] // AAD
+/// MEM[__h+255:__h+128] := Handle[255:128] // Tag
+/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText[127:0]
+/// MEM[__h+511:__h+384] := Handle[511:384] // CipherText[255:128]
+/// MEM[__h+639:__h+512] := 0 // Reserved for future usage
+/// MEM[__h+767:__h+640] := 0 // Reserved for future usage
+/// MEM[__h+895:__h+768] := 0 Integrity// Reserved for future usage
+/// OF := 0
+/// SF := 0
+/// ZF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
+ void *__h) {
+ return __builtin_ia32_encodekey256_u32(__htype, (__v2di)__key_lo,
+ (__v2di)__key_hi, __h);
+}
+
+/// The AESENC128KL performs 10 rounds of AES to encrypt the __idata using
+/// the 128-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENC128KL </c> instructions.
+///
+/// \operation
+/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
+/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
+/// (Handle[127:0] AND (CPL > 0)) ||
+/// Handle[383:256] ||
+/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
+/// IF (IllegalHandle)
+/// ZF := 1
+/// ELSE
+/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+/// IF (Authentic == 0)
+/// ZF := 1
+/// ELSE
+/// MEM[__odata+127:__odata] := AES128Encrypt (__idata[127:0], UnwrappedKey)
+/// ZF := 0
+/// FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+ return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+/// The AESENC256KL performs 14 rounds of AES to encrypt the __idata using
+/// the 256-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENC256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic.
+/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
+/// (Handle[127:0] AND (CPL > 0)) ||
+/// Handle[255:128] ||
+/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256 )
+/// IF (IllegalHandle)
+/// ZF := 1
+/// ELSE
+/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+/// IF (Authentic == 0)
+/// ZF := 1
+/// ELSE
+/// MEM[__odata+127:__odata] := AES256Encrypt (__idata[127:0], UnwrappedKey)
+/// ZF := 0
+/// FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+ return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+/// The AESDEC128KL performs 10 rounds of AES to decrypt the __idata using
+/// the 128-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDEC128KL </c> instructions.
+///
+/// \operation
+/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
+/// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) ||
+/// (Handle[127:0] AND (CPL > 0)) ||
+/// Handle[383:256] ||
+/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128)
+/// IF (IllegalHandle)
+/// ZF := 1
+/// ELSE
+/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+/// IF (Authentic == 0)
+/// ZF := 1
+/// ELSE
+/// MEM[__odata+127:__odata] := AES128Decrypt (__idata[127:0], UnwrappedKey)
+/// ZF := 0
+/// FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+ return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+/// The AESDEC256KL performs 10 rounds of AES to decrypt the __idata using
+/// the 256-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDEC256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h]
+/// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) ||
+/// (Handle[127:0] AND (CPL > 0)) ||
+/// Handle[383:256] ||
+/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256)
+/// IF (IllegalHandle)
+/// ZF := 1
+/// ELSE
+/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+/// IF (Authentic == 0)
+/// ZF := 1
+/// ELSE
+/// MEM[__odata+127:__odata] := AES256Decrypt (__idata[127:0], UnwrappedKey)
+/// ZF := 0
+/// FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+ return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
+ || defined(__KL__) */
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__WIDEKL__)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("kl,widekl"),\
+ __min_vector_width__(128)))
+
+/// Encrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENCWIDE128KL </c> instructions.
+///
+/// \operation
+/// Handle := MEM[__h+383:__h]
+/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
+/// (Handle[127:0] AND (CPL > 0)) ||
+/// Handle[255:128] ||
+/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
+/// IF (IllegalHandle)
+/// ZF := 1
+/// ELSE
+/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+/// IF Authentic == 0
+/// ZF := 1
+/// ELSE
+/// FOR i := 0 to 7
+/// __odata[i] := AES128Encrypt (__idata[i], UnwrappedKey)
+/// ENDFOR
+/// ZF := 0
+/// FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+ return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata,
+ (const __v2di *)__idata, __h);
+}
+
+/// Encrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENCWIDE256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h]
+/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
+/// (Handle[127:0] AND (CPL > 0)) ||
+/// Handle[255:128] ||
+/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES512 )
+/// IF (IllegalHandle)
+/// ZF := 1
+/// ELSE
+/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+/// IF Authentic == 0
+/// ZF := 1
+/// ELSE
+/// FOR i := 0 to 7
+/// __odata[i] := AES256Encrypt (__idata[i], UnwrappedKey)
+/// ENDFOR
+/// ZF := 0
+/// FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+ return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata,
+ (const __v2di *)__idata, __h);
+}
+
+/// Decrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDECWIDE128KL </c> instructions.
+///
+/// \operation
+/// Handle[383:0] := MEM[__h+383:__h]
+/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
+/// (Handle[127:0] AND (CPL > 0)) ||
+/// Handle[255:128] ||
+/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES128 )
+/// IF (IllegalHandle)
+/// ZF := 1
+/// ELSE
+/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+/// IF Authentic == 0
+/// ZF := 1
+/// ELSE
+/// FOR i := 0 to 7
+/// __odata[i] := AES128Decrypt (__idata[i], UnwrappedKey)
+/// ENDFOR
+/// ZF := 0
+/// FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+ return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata,
+ (const __v2di *)__idata, __h);
+}
+
+/// Decrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDECWIDE256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h]
+/// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) ||
+/// (Handle[127:0] AND (CPL > 0)) ||
+/// Handle[255:128] ||
+/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES512 )
+/// If (IllegalHandle)
+/// ZF := 1
+/// ELSE
+/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+/// IF Authentic == 0
+/// ZF := 1
+/// ELSE
+/// FOR i := 0 to 7
+/// __odata[i] := AES256Decrypt (__idata[i], UnwrappedKey)
+/// ENDFOR
+/// ZF := 0
+/// FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+ return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata,
+ (const __v2di *)__idata, __h);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
+ || defined(__WIDEKL__) */
+
+#endif /* _KEYLOCKERINTRIN_H */
diff --git a/clang/lib/Headers/mm_malloc.h b/clang/lib/Headers/mm_malloc.h
index 0ea32517aea8..933dbaacade5 100644
--- a/clang/lib/Headers/mm_malloc.h
+++ b/clang/lib/Headers/mm_malloc.h
@@ -54,7 +54,13 @@ _mm_malloc(size_t __size, size_t __align)
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_free(void *__p)
{
+#if defined(__MINGW32__)
+ __mingw_aligned_free(__p);
+#elif defined(_WIN32)
+ _aligned_free(__p);
+#else
free(__p);
+#endif
}
#endif
diff --git a/clang/lib/Headers/opencl-c-base.h b/clang/lib/Headers/opencl-c-base.h
index 430e07d36f62..e8dcd70377e5 100644
--- a/clang/lib/Headers/opencl-c-base.h
+++ b/clang/lib/Headers/opencl-c-base.h
@@ -9,6 +9,21 @@
#ifndef _OPENCL_BASE_H_
#define _OPENCL_BASE_H_
+// Define extension macros
+
+#if (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
+// For SPIR all extensions are supported.
+#if defined(__SPIR__)
+#define cl_khr_subgroup_extended_types 1
+#define cl_khr_subgroup_non_uniform_vote 1
+#define cl_khr_subgroup_ballot 1
+#define cl_khr_subgroup_non_uniform_arithmetic 1
+#define cl_khr_subgroup_shuffle 1
+#define cl_khr_subgroup_shuffle_relative 1
+#define cl_khr_subgroup_clustered_reduce 1
+#endif // defined(__SPIR__)
+#endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
+
// built-in scalar data types:
/**
@@ -568,4 +583,7 @@ typedef struct {
#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
#endif // cl_intel_device_side_avc_motion_estimation
+// Disable any extensions we may have enabled previously.
+#pragma OPENCL EXTENSION all : disable
+
#endif //_OPENCL_BASE_H_
diff --git a/clang/lib/Headers/opencl-c.h b/clang/lib/Headers/opencl-c.h
index 66e18bdd47bb..ab665628c8e1 100644
--- a/clang/lib/Headers/opencl-c.h
+++ b/clang/lib/Headers/opencl-c.h
@@ -4633,6 +4633,7 @@ float16 __ovld __cnfn convert_float16(float16);
// Conversions with double data type parameters or return value.
#ifdef cl_khr_fp64
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
char __ovld __cnfn convert_char(double);
char __ovld __cnfn convert_char_rte(double);
char __ovld __cnfn convert_char_rtn(double);
@@ -5455,6 +5456,7 @@ double16 __ovld __cnfn convert_double16_rtz(ushort16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
// Convert half types to non-double types.
uchar __ovld __cnfn convert_uchar(half);
uchar __ovld __cnfn convert_uchar_rte(half);
diff --git a/clang/lib/Headers/openmp_wrappers/cmath b/clang/lib/Headers/openmp_wrappers/cmath
index bd6011eb6f6d..1aff66af7d52 100644
--- a/clang/lib/Headers/openmp_wrappers/cmath
+++ b/clang/lib/Headers/openmp_wrappers/cmath
@@ -24,8 +24,11 @@
// which might live in cstdlib.
#include <cstdlib>
+// We need limits because __clang_cuda_cmath.h below uses `std::numeric_limit`.
+#include <limits>
+
#pragma omp begin declare variant match( \
- device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any, allow_templates)})
#define __CUDA__
#define __OPENMP_NVPTX__
diff --git a/clang/lib/Headers/openmp_wrappers/complex b/clang/lib/Headers/openmp_wrappers/complex
index 1ed0b14879ef..142e526b81b3 100644
--- a/clang/lib/Headers/openmp_wrappers/complex
+++ b/clang/lib/Headers/openmp_wrappers/complex
@@ -18,8 +18,35 @@
#include <cmath>
#define __CUDA__
+#define __OPENMP_NVPTX__
#include <__clang_cuda_complex_builtins.h>
+#undef __OPENMP_NVPTX__
#endif
// Grab the host header too.
#include_next <complex>
+
+
+#ifdef __cplusplus
+
+// If we are compiling against libc++, the macro _LIBCPP_STD_VER should be set
+// after including <cmath> above. Since the complex header we use is a
+// simplified version of the libc++, we don't need it in this case. If we
+// compile against libstdc++, or any other standard library, we will overload
+// the (hopefully template) functions in the <complex> header with the ones we
+// got from libc++ which decomposes math functions, like `std::sin`, into
+// arithmetic and calls to non-complex functions, all of which we can then
+// handle.
+#ifndef _LIBCPP_STD_VER
+
+#pragma omp begin declare variant match( \
+ device = {arch(nvptx, nvptx64)}, \
+ implementation = {extension(match_any, allow_templates)})
+
+#include <complex_cmath.h>
+
+#pragma omp end declare variant
+
+#endif
+
+#endif
diff --git a/clang/lib/Headers/openmp_wrappers/complex.h b/clang/lib/Headers/openmp_wrappers/complex.h
index 829c7a785725..00d278548f82 100644
--- a/clang/lib/Headers/openmp_wrappers/complex.h
+++ b/clang/lib/Headers/openmp_wrappers/complex.h
@@ -18,7 +18,9 @@
#include <math.h>
#define __CUDA__
+#define __OPENMP_NVPTX__
#include <__clang_cuda_complex_builtins.h>
+#undef __OPENMP_NVPTX__
#endif
// Grab the host header too.
diff --git a/clang/lib/Headers/openmp_wrappers/complex_cmath.h b/clang/lib/Headers/openmp_wrappers/complex_cmath.h
new file mode 100644
index 000000000000..e3d9aebbbc24
--- /dev/null
+++ b/clang/lib/Headers/openmp_wrappers/complex_cmath.h
@@ -0,0 +1,388 @@
+//===------------------------- __complex_cmath.h --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// std::complex header copied from the libcxx source and simplified for use in
+// OpenMP target offload regions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#ifndef __cplusplus
+#error "This file is for C++ compilation only."
+#endif
+
+#ifndef _LIBCPP_COMPLEX
+#define _LIBCPP_COMPLEX
+
+#include <cmath>
+#include <type_traits>
+
+#define __DEVICE__ static constexpr __attribute__((nothrow))
+
+namespace std {
+
+// abs
+
+template <class _Tp> __DEVICE__ _Tp abs(const std::complex<_Tp> &__c) {
+ return hypot(__c.real(), __c.imag());
+}
+
+// arg
+
+template <class _Tp> __DEVICE__ _Tp arg(const std::complex<_Tp> &__c) {
+ return atan2(__c.imag(), __c.real());
+}
+
+template <class _Tp>
+typename enable_if<is_integral<_Tp>::value || is_same<_Tp, double>::value,
+ double>::type
+arg(_Tp __re) {
+ return atan2(0., __re);
+}
+
+template <class _Tp>
+typename enable_if<is_same<_Tp, float>::value, float>::type arg(_Tp __re) {
+ return atan2f(0.F, __re);
+}
+
+// norm
+
+template <class _Tp> __DEVICE__ _Tp norm(const std::complex<_Tp> &__c) {
+ if (std::isinf(__c.real()))
+ return abs(__c.real());
+ if (std::isinf(__c.imag()))
+ return abs(__c.imag());
+ return __c.real() * __c.real() + __c.imag() * __c.imag();
+}
+
+// conj
+
+template <class _Tp> std::complex<_Tp> conj(const std::complex<_Tp> &__c) {
+ return std::complex<_Tp>(__c.real(), -__c.imag());
+}
+
+// proj
+
+template <class _Tp> std::complex<_Tp> proj(const std::complex<_Tp> &__c) {
+ std::complex<_Tp> __r = __c;
+ if (std::isinf(__c.real()) || std::isinf(__c.imag()))
+ __r = std::complex<_Tp>(INFINITY, copysign(_Tp(0), __c.imag()));
+ return __r;
+}
+
+// polar
+
+template <class _Tp>
+complex<_Tp> polar(const _Tp &__rho, const _Tp &__theta = _Tp()) {
+ if (std::isnan(__rho) || signbit(__rho))
+ return std::complex<_Tp>(_Tp(NAN), _Tp(NAN));
+ if (std::isnan(__theta)) {
+ if (std::isinf(__rho))
+ return std::complex<_Tp>(__rho, __theta);
+ return std::complex<_Tp>(__theta, __theta);
+ }
+ if (std::isinf(__theta)) {
+ if (std::isinf(__rho))
+ return std::complex<_Tp>(__rho, _Tp(NAN));
+ return std::complex<_Tp>(_Tp(NAN), _Tp(NAN));
+ }
+ _Tp __x = __rho * cos(__theta);
+ if (std::isnan(__x))
+ __x = 0;
+ _Tp __y = __rho * sin(__theta);
+ if (std::isnan(__y))
+ __y = 0;
+ return std::complex<_Tp>(__x, __y);
+}
+
+// log
+
+template <class _Tp> std::complex<_Tp> log(const std::complex<_Tp> &__x) {
+ return std::complex<_Tp>(log(abs(__x)), arg(__x));
+}
+
+// log10
+
+template <class _Tp> std::complex<_Tp> log10(const std::complex<_Tp> &__x) {
+ return log(__x) / log(_Tp(10));
+}
+
+// sqrt
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> sqrt(const std::complex<_Tp> &__x) {
+ if (std::isinf(__x.imag()))
+ return std::complex<_Tp>(_Tp(INFINITY), __x.imag());
+ if (std::isinf(__x.real())) {
+ if (__x.real() > _Tp(0))
+ return std::complex<_Tp>(__x.real(), std::isnan(__x.imag())
+ ? __x.imag()
+ : copysign(_Tp(0), __x.imag()));
+ return std::complex<_Tp>(std::isnan(__x.imag()) ? __x.imag() : _Tp(0),
+ copysign(__x.real(), __x.imag()));
+ }
+ return polar(sqrt(abs(__x)), arg(__x) / _Tp(2));
+}
+
+// exp
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> exp(const std::complex<_Tp> &__x) {
+ _Tp __i = __x.imag();
+ if (std::isinf(__x.real())) {
+ if (__x.real() < _Tp(0)) {
+ if (!std::isfinite(__i))
+ __i = _Tp(1);
+ } else if (__i == 0 || !std::isfinite(__i)) {
+ if (std::isinf(__i))
+ __i = _Tp(NAN);
+ return std::complex<_Tp>(__x.real(), __i);
+ }
+ } else if (std::isnan(__x.real()) && __x.imag() == 0)
+ return __x;
+ _Tp __e = exp(__x.real());
+ return std::complex<_Tp>(__e * cos(__i), __e * sin(__i));
+}
+
+// pow
+
+template <class _Tp>
+std::complex<_Tp> pow(const std::complex<_Tp> &__x,
+ const std::complex<_Tp> &__y) {
+ return exp(__y * log(__x));
+}
+
+// __sqr, computes pow(x, 2)
+
+template <class _Tp> std::complex<_Tp> __sqr(const std::complex<_Tp> &__x) {
+ return std::complex<_Tp>((__x.real() - __x.imag()) *
+ (__x.real() + __x.imag()),
+ _Tp(2) * __x.real() * __x.imag());
+}
+
+// asinh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> asinh(const std::complex<_Tp> &__x) {
+ const _Tp __pi(atan2(+0., -0.));
+ if (std::isinf(__x.real())) {
+ if (std::isnan(__x.imag()))
+ return __x;
+ if (std::isinf(__x.imag()))
+ return std::complex<_Tp>(__x.real(),
+ copysign(__pi * _Tp(0.25), __x.imag()));
+ return std::complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag()));
+ }
+ if (std::isnan(__x.real())) {
+ if (std::isinf(__x.imag()))
+ return std::complex<_Tp>(__x.imag(), __x.real());
+ if (__x.imag() == 0)
+ return __x;
+ return std::complex<_Tp>(__x.real(), __x.real());
+ }
+ if (std::isinf(__x.imag()))
+ return std::complex<_Tp>(copysign(__x.imag(), __x.real()),
+ copysign(__pi / _Tp(2), __x.imag()));
+ std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) + _Tp(1)));
+ return std::complex<_Tp>(copysign(__z.real(), __x.real()),
+ copysign(__z.imag(), __x.imag()));
+}
+
+// acosh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> acosh(const std::complex<_Tp> &__x) {
+ const _Tp __pi(atan2(+0., -0.));
+ if (std::isinf(__x.real())) {
+ if (std::isnan(__x.imag()))
+ return std::complex<_Tp>(abs(__x.real()), __x.imag());
+ if (std::isinf(__x.imag())) {
+ if (__x.real() > 0)
+ return std::complex<_Tp>(__x.real(),
+ copysign(__pi * _Tp(0.25), __x.imag()));
+ else
+ return std::complex<_Tp>(-__x.real(),
+ copysign(__pi * _Tp(0.75), __x.imag()));
+ }
+ if (__x.real() < 0)
+ return std::complex<_Tp>(-__x.real(), copysign(__pi, __x.imag()));
+ return std::complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag()));
+ }
+ if (std::isnan(__x.real())) {
+ if (std::isinf(__x.imag()))
+ return std::complex<_Tp>(abs(__x.imag()), __x.real());
+ return std::complex<_Tp>(__x.real(), __x.real());
+ }
+ if (std::isinf(__x.imag()))
+ return std::complex<_Tp>(abs(__x.imag()),
+ copysign(__pi / _Tp(2), __x.imag()));
+ std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1)));
+ return std::complex<_Tp>(copysign(__z.real(), _Tp(0)),
+ copysign(__z.imag(), __x.imag()));
+}
+
+// atanh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> atanh(const std::complex<_Tp> &__x) {
+ const _Tp __pi(atan2(+0., -0.));
+ if (std::isinf(__x.imag())) {
+ return std::complex<_Tp>(copysign(_Tp(0), __x.real()),
+ copysign(__pi / _Tp(2), __x.imag()));
+ }
+ if (std::isnan(__x.imag())) {
+ if (std::isinf(__x.real()) || __x.real() == 0)
+ return std::complex<_Tp>(copysign(_Tp(0), __x.real()), __x.imag());
+ return std::complex<_Tp>(__x.imag(), __x.imag());
+ }
+ if (std::isnan(__x.real())) {
+ return std::complex<_Tp>(__x.real(), __x.real());
+ }
+ if (std::isinf(__x.real())) {
+ return std::complex<_Tp>(copysign(_Tp(0), __x.real()),
+ copysign(__pi / _Tp(2), __x.imag()));
+ }
+ if (abs(__x.real()) == _Tp(1) && __x.imag() == _Tp(0)) {
+ return std::complex<_Tp>(copysign(_Tp(INFINITY), __x.real()),
+ copysign(_Tp(0), __x.imag()));
+ }
+ std::complex<_Tp> __z = log((_Tp(1) + __x) / (_Tp(1) - __x)) / _Tp(2);
+ return std::complex<_Tp>(copysign(__z.real(), __x.real()),
+ copysign(__z.imag(), __x.imag()));
+}
+
+// sinh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> sinh(const std::complex<_Tp> &__x) {
+ if (std::isinf(__x.real()) && !std::isfinite(__x.imag()))
+ return std::complex<_Tp>(__x.real(), _Tp(NAN));
+ if (__x.real() == 0 && !std::isfinite(__x.imag()))
+ return std::complex<_Tp>(__x.real(), _Tp(NAN));
+ if (__x.imag() == 0 && !std::isfinite(__x.real()))
+ return __x;
+ return std::complex<_Tp>(sinh(__x.real()) * cos(__x.imag()),
+ cosh(__x.real()) * sin(__x.imag()));
+}
+
+// cosh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> cosh(const std::complex<_Tp> &__x) {
+ if (std::isinf(__x.real()) && !std::isfinite(__x.imag()))
+ return std::complex<_Tp>(abs(__x.real()), _Tp(NAN));
+ if (__x.real() == 0 && !std::isfinite(__x.imag()))
+ return std::complex<_Tp>(_Tp(NAN), __x.real());
+ if (__x.real() == 0 && __x.imag() == 0)
+ return std::complex<_Tp>(_Tp(1), __x.imag());
+ if (__x.imag() == 0 && !std::isfinite(__x.real()))
+ return std::complex<_Tp>(abs(__x.real()), __x.imag());
+ return std::complex<_Tp>(cosh(__x.real()) * cos(__x.imag()),
+ sinh(__x.real()) * sin(__x.imag()));
+}
+
+// tanh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> tanh(const std::complex<_Tp> &__x) {
+ if (std::isinf(__x.real())) {
+ if (!std::isfinite(__x.imag()))
+ return std::complex<_Tp>(_Tp(1), _Tp(0));
+ return std::complex<_Tp>(_Tp(1),
+ copysign(_Tp(0), sin(_Tp(2) * __x.imag())));
+ }
+ if (std::isnan(__x.real()) && __x.imag() == 0)
+ return __x;
+ _Tp __2r(_Tp(2) * __x.real());
+ _Tp __2i(_Tp(2) * __x.imag());
+ _Tp __d(cosh(__2r) + cos(__2i));
+ _Tp __2rsh(sinh(__2r));
+ if (std::isinf(__2rsh) && std::isinf(__d))
+ return std::complex<_Tp>(__2rsh > _Tp(0) ? _Tp(1) : _Tp(-1),
+ __2i > _Tp(0) ? _Tp(0) : _Tp(-0.));
+ return std::complex<_Tp>(__2rsh / __d, sin(__2i) / __d);
+}
+
+// asin
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> asin(const std::complex<_Tp> &__x) {
+ std::complex<_Tp> __z = asinh(complex<_Tp>(-__x.imag(), __x.real()));
+ return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+// acos
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> acos(const std::complex<_Tp> &__x) {
+ const _Tp __pi(atan2(+0., -0.));
+ if (std::isinf(__x.real())) {
+ if (std::isnan(__x.imag()))
+ return std::complex<_Tp>(__x.imag(), __x.real());
+ if (std::isinf(__x.imag())) {
+ if (__x.real() < _Tp(0))
+ return std::complex<_Tp>(_Tp(0.75) * __pi, -__x.imag());
+ return std::complex<_Tp>(_Tp(0.25) * __pi, -__x.imag());
+ }
+ if (__x.real() < _Tp(0))
+ return std::complex<_Tp>(__pi,
+ signbit(__x.imag()) ? -__x.real() : __x.real());
+ return std::complex<_Tp>(_Tp(0),
+ signbit(__x.imag()) ? __x.real() : -__x.real());
+ }
+ if (std::isnan(__x.real())) {
+ if (std::isinf(__x.imag()))
+ return std::complex<_Tp>(__x.real(), -__x.imag());
+ return std::complex<_Tp>(__x.real(), __x.real());
+ }
+ if (std::isinf(__x.imag()))
+ return std::complex<_Tp>(__pi / _Tp(2), -__x.imag());
+ if (__x.real() == 0 && (__x.imag() == 0 || isnan(__x.imag())))
+ return std::complex<_Tp>(__pi / _Tp(2), -__x.imag());
+ std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1)));
+ if (signbit(__x.imag()))
+ return std::complex<_Tp>(abs(__z.imag()), abs(__z.real()));
+ return std::complex<_Tp>(abs(__z.imag()), -abs(__z.real()));
+}
+
+// atan
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> atan(const std::complex<_Tp> &__x) {
+ std::complex<_Tp> __z = atanh(complex<_Tp>(-__x.imag(), __x.real()));
+ return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+// sin
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> sin(const std::complex<_Tp> &__x) {
+ std::complex<_Tp> __z = sinh(complex<_Tp>(-__x.imag(), __x.real()));
+ return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+// cos
+
+template <class _Tp> std::complex<_Tp> cos(const std::complex<_Tp> &__x) {
+ return cosh(complex<_Tp>(-__x.imag(), __x.real()));
+}
+
+// tan
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> tan(const std::complex<_Tp> &__x) {
+ std::complex<_Tp> __z = tanh(complex<_Tp>(-__x.imag(), __x.real()));
+ return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+} // namespace std
+
+#endif
diff --git a/clang/lib/Headers/popcntintrin.h b/clang/lib/Headers/popcntintrin.h
index 312901014796..0aa94aecda5b 100644
--- a/clang/lib/Headers/popcntintrin.h
+++ b/clang/lib/Headers/popcntintrin.h
@@ -13,6 +13,12 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
/// Counts the number of bits in the source operand having a value of 1.
///
/// \headerfile <x86intrin.h>
@@ -23,7 +29,7 @@
/// An unsigned 32-bit integer operand.
/// \returns A 32-bit integer containing the number of bits with value 1 in the
/// source operand.
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
_mm_popcnt_u32(unsigned int __A)
{
return __builtin_popcount(__A);
@@ -40,7 +46,7 @@ _mm_popcnt_u32(unsigned int __A)
/// An unsigned 64-bit integer operand.
/// \returns A 64-bit integer containing the number of bits with value 1 in the
/// source operand.
-static __inline__ long long __DEFAULT_FN_ATTRS
+static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
_mm_popcnt_u64(unsigned long long __A)
{
return __builtin_popcountll(__A);
@@ -48,5 +54,6 @@ _mm_popcnt_u64(unsigned long long __A)
#endif /* __x86_64__ */
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif /* __POPCNTINTRIN_H */
diff --git a/clang/lib/Headers/ppc_wrappers/smmintrin.h b/clang/lib/Headers/ppc_wrappers/smmintrin.h
index 56ef6ba76b06..64f0c761994d 100644
--- a/clang/lib/Headers/ppc_wrappers/smmintrin.h
+++ b/clang/lib/Headers/ppc_wrappers/smmintrin.h
@@ -78,6 +78,30 @@ extern __inline __m128i
return (__m128i)vec_sel((__v16qu)__A, (__v16qu)__B, __lmask);
}
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi8(__m128i const __A, int const __D, int const __N) {
+ __v16qi result = (__v16qi)__A;
+ result[__N & 0xf] = __D;
+ return (__m128i)result;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi32(__m128i const __A, int const __D, int const __N) {
+ __v4si result = (__v4si)__A;
+ result[__N & 3] = __D;
+ return (__m128i)result;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) {
+ __v2di result = (__v2di)__A;
+ result[__N & 1] = __D;
+ return (__m128i)result;
+}
+
#else
#include_next <smmintrin.h>
#endif /* defined(__linux__) && defined(__ppc64__) */
diff --git a/clang/lib/Headers/uintrintrin.h b/clang/lib/Headers/uintrintrin.h
new file mode 100644
index 000000000000..78aa8779c325
--- /dev/null
+++ b/clang/lib/Headers/uintrintrin.h
@@ -0,0 +1,150 @@
+/*===------------------ uintrintrin.h - UINTR intrinsics -------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86GPRINTRIN_H
+#error "Never use <uintrintrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef __UINTRINTRIN_H
+#define __UINTRINTRIN_H
+
+/* Define the default attributes for the functions in this file */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("uintr")))
+
+#ifdef __x86_64__
+
+/// Clears the user interrupt flag (UIF). Its effect takes place immediately: a
+/// user interrupt cannot be delivered on the instruction boundary following
+/// CLUI. Can be executed only if CR4.UINT = 1, the logical processor is in
+/// 64-bit mode, and software is not executing inside an enclave; otherwise,
+/// each causes an invalid-opcode exception. Causes a transactional abort if
+/// executed inside a transactional region; the abort loads EAX as it would
+/// had it been due to an execution of CLI.
+///
+/// \headerfile <x86gprintrin.h>
+///
+/// This intrinsic corresponds to the <c> CLUI </c> instruction.
+///
+/// \operation
+/// UIF := 0
+/// \endoperation
+static __inline__ void __DEFAULT_FN_ATTRS
+_clui (void)
+{
+ __builtin_ia32_clui();
+}
+
+/// Sets the user interrupt flag (UIF). Its effect takes place immediately; a
+/// user interrupt may be delivered on the instruction boundary following
+/// STUI. Can be executed only if CR4.UINT = 1, the logical processor is in
+/// 64-bit mode, and software is not executing inside an enclave; otherwise,
+/// each causes an invalid-opcode exception. Causes a transactional abort if
+/// executed inside a transactional region; the abort loads EAX as it would
+/// had it been due to an execution of STI.
+///
+/// \headerfile <x86gprintrin.h>
+///
+/// This intrinsic corresponds to the <c> STUI </c> instruction.
+///
+/// \operation
+/// UIF := 1
+/// \endoperation
+static __inline__ void __DEFAULT_FN_ATTRS
+_stui (void)
+{
+ __builtin_ia32_stui();
+}
+
+/// Get the current value of the user interrupt flag (UIF). Can be executed
+/// regardless of CPL and inside a transactional region. Can be executed only
+/// if CR4.UINT = 1, the logical processor is in 64-bit mode, and software is
+/// not executing inside an enclave; otherwise, it causes an invalid-opcode
+/// exception.
+///
+/// \headerfile <x86gprintrin.h>
+///
+/// This intrinsic corresponds to the <c> TESTUI </c> instruction.
+///
+/// \returns The current value of the user interrupt flag (UIF).
+///
+/// \operation
+/// CF := UIF
+/// ZF := 0
+/// AF := 0
+/// OF := 0
+/// PF := 0
+/// SF := 0
+/// dst := CF
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_testui (void)
+{
+ return __builtin_ia32_testui();
+}
+
+/// Send interprocessor user interrupt. Can be executed only if
+/// CR4.UINT = IA32_UINT_TT[0] = 1, the logical processor is in 64-bit mode,
+/// and software is not executing inside an enclave; otherwise, it causes an
+/// invalid-opcode exception. May be executed at any privilege level, all of
+/// its memory accesses are performed with supervisor privilege.
+///
+/// \headerfile <x86gprintrin.h>
+///
+/// This intrinsic corresponds to the <c> SENDUIPI </c> instruction
+///
+/// \param __a
+/// Index of user-interrupt target table entry in user-interrupt target
+/// table.
+///
+/// \operation
+/// IF __a > UITTSZ
+/// GP (0)
+/// FI
+/// tempUITTE := MEM[UITTADDR + (a<<4)]
+/// // tempUITTE must be valid, and can't have any reserved bit set
+/// IF (tempUITTE.V == 0 OR tempUITTE[7:1] != 0)
+/// GP (0)
+/// FI
+/// tempUPID := MEM[tempUITTE.UPIDADDR] // under lock
+/// // tempUPID can't have any reserved bit set
+/// IF (tempUPID[15:2] != 0 OR tempUPID[31:24] != 0)
+/// GP (0) // release lock
+/// FI
+/// tempUPID.PIR[tempUITTE.UV] := 1;
+/// IF (tempUPID.SN == 0 AND tempUPID.ON == 0)
+/// tempUPID.ON := 1
+/// sendNotify := 1
+/// ELSE
+/// sendNotify := 0
+/// FI
+/// MEM[tempUITTE.UPIDADDR] := tempUPID // release lock
+/// IF sendNotify == 1
+/// IF IA32_APIC_BASE[10] == 1 // local APIC is in x2APIC mode
+/// // send ordinary IPI with vector tempUPID.NV to 32-bit physical APIC
+/// // ID tempUPID.NDST
+/// SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST)
+/// ELSE
+/// // send ordinary IPI with vector tempUPID.NV to 8-bit physical APIC
+/// // ID tempUPID.NDST[15:8]
+/// SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST[15:8])
+/// FI
+/// FI
+/// \endoperation
+static __inline__ void __DEFAULT_FN_ATTRS
+_senduipi (unsigned long long __a)
+{
+ __builtin_ia32_senduipi(__a);
+}
+
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __UINTRINTRIN_H */
diff --git a/clang/lib/Headers/wasm_simd128.h b/clang/lib/Headers/wasm_simd128.h
index b78123834b64..ac88516ac924 100644
--- a/clang/lib/Headers/wasm_simd128.h
+++ b/clang/lib/Headers/wasm_simd128.h
@@ -18,8 +18,7 @@ typedef int32_t v128_t __attribute__((__vector_size__(16), __aligned__(16)));
// Internal types determined by clang builtin definitions
typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef char __i8x16 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef signed char __s8x16
+typedef signed char __i8x16
__attribute__((__vector_size__(16), __aligned__(16)));
typedef unsigned char __u8x16
__attribute__((__vector_size__(16), __aligned__(16)));
@@ -35,6 +34,13 @@ typedef unsigned long long __u64x2
typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16)));
typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef signed char __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+typedef unsigned char __u8x8
+ __attribute__((__vector_size__(8), __aligned__(8)));
+typedef short __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+typedef unsigned short __u16x4
+ __attribute__((__vector_size__(8), __aligned__(8)));
+
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("simd128"), \
__min_vector_width__(128)))
@@ -273,7 +279,7 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) {
(__builtin_wasm_extract_lane_s_i8x16((__i8x16)(__a), __i))
#define wasm_u8x16_extract_lane(__a, __i) \
- (__builtin_wasm_extract_lane_u_i8x16((__i8x16)(__a), __i))
+ (__builtin_wasm_extract_lane_u_i8x16((__u8x16)(__a), __i))
#define wasm_i8x16_replace_lane(__a, __i, __b) \
((v128_t)__builtin_wasm_replace_lane_i8x16((__i8x16)(__a), __i, __b))
@@ -286,7 +292,7 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) {
(__builtin_wasm_extract_lane_s_i16x8((__i16x8)(__a), __i))
#define wasm_u16x8_extract_lane(__a, __i) \
- (__builtin_wasm_extract_lane_u_i16x8((__i16x8)(__a), __i))
+ (__builtin_wasm_extract_lane_u_i16x8((__u16x8)(__a), __i))
#define wasm_i16x8_replace_lane(__a, __i, __b) \
((v128_t)__builtin_wasm_replace_lane_i16x8((__i16x8)(__a), __i, __b))
@@ -333,17 +339,17 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,
v128_t __b) {
- return (v128_t)((__s8x16)__a == (__s8x16)__b);
+ return (v128_t)((__i8x16)__a == (__i8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a,
v128_t __b) {
- return (v128_t)((__s8x16)__a != (__s8x16)__b);
+ return (v128_t)((__i8x16)__a != (__i8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a,
v128_t __b) {
- return (v128_t)((__s8x16)__a < (__s8x16)__b);
+ return (v128_t)((__i8x16)__a < (__i8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,
@@ -353,7 +359,7 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a,
v128_t __b) {
- return (v128_t)((__s8x16)__a > (__s8x16)__b);
+ return (v128_t)((__i8x16)__a > (__i8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,
@@ -363,7 +369,7 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a,
v128_t __b) {
- return (v128_t)((__s8x16)__a <= (__s8x16)__b);
+ return (v128_t)((__i8x16)__a <= (__i8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,
@@ -373,7 +379,7 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a,
v128_t __b) {
- return (v128_t)((__s8x16)__a >= (__s8x16)__b);
+ return (v128_t)((__i8x16)__a >= (__i8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a,
@@ -595,7 +601,7 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
int32_t __b) {
- return (v128_t)((__s8x16)__a >> __b);
+ return (v128_t)((__i8x16)__a >> __b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
@@ -616,8 +622,8 @@ wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__i8x16)__a,
- (__i8x16)__b);
+ return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__u8x16)__a,
+ (__u8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
@@ -633,8 +639,8 @@ wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__i8x16)__a,
- (__i8x16)__b);
+ return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__u8x16)__a,
+ (__u8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
@@ -644,7 +650,7 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_min_u_i8x16((__i8x16)__a, (__i8x16)__b);
+ return (v128_t)__builtin_wasm_min_u_i8x16((__u8x16)__a, (__u8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,
@@ -654,12 +660,12 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_max_u_i8x16((__i8x16)__a, (__i8x16)__b);
+ return (v128_t)__builtin_wasm_max_u_i8x16((__u8x16)__a, (__u8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_avgr_u_i8x16((__i8x16)__a, (__i8x16)__b);
+ return (v128_t)__builtin_wasm_avgr_u_i8x16((__u8x16)__a, (__u8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) {
@@ -706,8 +712,8 @@ wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__i16x8)__a,
- (__i16x8)__b);
+ return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__u16x8)__a,
+ (__u16x8)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
@@ -723,8 +729,8 @@ wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__i16x8)__a,
- (__i16x8)__b);
+ return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__u16x8)__a,
+ (__u16x8)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
@@ -739,7 +745,7 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_min(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_min_u_i16x8((__i16x8)__a, (__i16x8)__b);
+ return (v128_t)__builtin_wasm_min_u_i16x8((__u16x8)__a, (__u16x8)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,
@@ -749,12 +755,12 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_max_u_i16x8((__i16x8)__a, (__i16x8)__b);
+ return (v128_t)__builtin_wasm_max_u_i16x8((__u16x8)__a, (__u16x8)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_avgr_u_i16x8((__i16x8)__a, (__i16x8)__b);
+ return (v128_t)__builtin_wasm_avgr_u_i16x8((__u16x8)__a, (__u16x8)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) {
@@ -810,7 +816,7 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_min(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_min_u_i32x4((__i32x4)__a, (__i32x4)__b);
+ return (v128_t)__builtin_wasm_min_u_i32x4((__u32x4)__a, (__u32x4)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,
@@ -820,7 +826,7 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_max_u_i32x4((__i32x4)__a, (__i32x4)__b);
+ return (v128_t)__builtin_wasm_max_u_i32x4((__u32x4)__a, (__u32x4)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
@@ -1071,8 +1077,8 @@ wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b) {
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,
- (__i16x8)__b);
+ return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__u16x8)__a,
+ (__u16x8)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
@@ -1083,48 +1089,76 @@ wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b) {
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,
- (__i32x4)__b);
+ return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__u32x4)__a,
+ (__u32x4)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i16x8_widen_low_i8x16(v128_t __a) {
- return (v128_t)__builtin_wasm_widen_low_s_i16x8_i8x16((__i8x16)__a);
+ return (v128_t) __builtin_convertvector(
+ (__i8x8){((__i8x16)__a)[0], ((__i8x16)__a)[1], ((__i8x16)__a)[2],
+ ((__i8x16)__a)[3], ((__i8x16)__a)[4], ((__i8x16)__a)[5],
+ ((__i8x16)__a)[6], ((__i8x16)__a)[7]},
+ __i16x8);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i16x8_widen_high_i8x16(v128_t __a) {
- return (v128_t)__builtin_wasm_widen_high_s_i16x8_i8x16((__i8x16)__a);
+ return (v128_t) __builtin_convertvector(
+ (__i8x8){((__i8x16)__a)[8], ((__i8x16)__a)[9], ((__i8x16)__a)[10],
+ ((__i8x16)__a)[11], ((__i8x16)__a)[12], ((__i8x16)__a)[13],
+ ((__i8x16)__a)[14], ((__i8x16)__a)[15]},
+ __i16x8);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i16x8_widen_low_u8x16(v128_t __a) {
- return (v128_t)__builtin_wasm_widen_low_u_i16x8_i8x16((__i8x16)__a);
+ return (v128_t) __builtin_convertvector(
+ (__u8x8){((__u8x16)__a)[0], ((__u8x16)__a)[1], ((__u8x16)__a)[2],
+ ((__u8x16)__a)[3], ((__u8x16)__a)[4], ((__u8x16)__a)[5],
+ ((__u8x16)__a)[6], ((__u8x16)__a)[7]},
+ __u16x8);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i16x8_widen_high_u8x16(v128_t __a) {
- return (v128_t)__builtin_wasm_widen_high_u_i16x8_i8x16((__i8x16)__a);
+ return (v128_t) __builtin_convertvector(
+ (__u8x8){((__u8x16)__a)[8], ((__u8x16)__a)[9], ((__u8x16)__a)[10],
+ ((__u8x16)__a)[11], ((__u8x16)__a)[12], ((__u8x16)__a)[13],
+ ((__u8x16)__a)[14], ((__u8x16)__a)[15]},
+ __u16x8);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i32x4_widen_low_i16x8(v128_t __a) {
- return (v128_t)__builtin_wasm_widen_low_s_i32x4_i16x8((__i16x8)__a);
+ return (v128_t) __builtin_convertvector(
+ (__i16x4){((__i16x8)__a)[0], ((__i16x8)__a)[1], ((__i16x8)__a)[2],
+ ((__i16x8)__a)[3]},
+ __i32x4);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i32x4_widen_high_i16x8(v128_t __a) {
- return (v128_t)__builtin_wasm_widen_high_s_i32x4_i16x8((__i16x8)__a);
+ return (v128_t) __builtin_convertvector(
+ (__i16x4){((__i16x8)__a)[4], ((__i16x8)__a)[5], ((__i16x8)__a)[6],
+ ((__i16x8)__a)[7]},
+ __i32x4);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i32x4_widen_low_u16x8(v128_t __a) {
- return (v128_t)__builtin_wasm_widen_low_u_i32x4_i16x8((__i16x8)__a);
+ return (v128_t) __builtin_convertvector(
+ (__u16x4){((__u16x8)__a)[0], ((__u16x8)__a)[1], ((__u16x8)__a)[2],
+ ((__u16x8)__a)[3]},
+ __u32x4);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i32x4_widen_high_u16x8(v128_t __a) {
- return (v128_t)__builtin_wasm_widen_high_u_i32x4_i16x8((__i16x8)__a);
+ return (v128_t) __builtin_convertvector(
+ (__u16x4){((__u16x8)__a)[4], ((__u16x8)__a)[5], ((__u16x8)__a)[6],
+ ((__u16x8)__a)[7]},
+ __u32x4);
}
// Undefine helper macros
diff --git a/clang/lib/Headers/x86gprintrin.h b/clang/lib/Headers/x86gprintrin.h
new file mode 100644
index 000000000000..1fc6cab4b28f
--- /dev/null
+++ b/clang/lib/Headers/x86gprintrin.h
@@ -0,0 +1,23 @@
+/*===--------------- x86gprintrin.h - X86 GPR intrinsics ------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86GPRINTRIN_H
+#define __X86GPRINTRIN_H
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__HRESET__)
+#include <hresetintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__UINTR__)
+#include <uintrintrin.h>
+#endif
+
+#endif /* __X86GPRINTRIN_H */
diff --git a/clang/lib/Index/FileIndexRecord.cpp b/clang/lib/Index/FileIndexRecord.cpp
index 753bdf2ce21d..df18a9aed8b7 100644
--- a/clang/lib/Index/FileIndexRecord.cpp
+++ b/clang/lib/Index/FileIndexRecord.cpp
@@ -52,7 +52,7 @@ void FileIndexRecord::print(llvm::raw_ostream &OS) const {
<< ':' << PLoc.getColumn();
if (auto ND = dyn_cast<NamedDecl>(D)) {
- OS << ' ' << ND->getNameAsString();
+ OS << ' ' << ND->getDeclName();
}
OS << '\n';
diff --git a/clang/lib/Index/IndexBody.cpp b/clang/lib/Index/IndexBody.cpp
index 01cf559d7057..e4944fd0fc3b 100644
--- a/clang/lib/Index/IndexBody.cpp
+++ b/clang/lib/Index/IndexBody.cpp
@@ -391,11 +391,13 @@ public:
if (C->capturesThis() || C->capturesVLAType())
return true;
+ if (!base::TraverseStmt(Init))
+ return false;
+
if (C->capturesVariable() && IndexCtx.shouldIndexFunctionLocalSymbols())
return IndexCtx.handleReference(C->getCapturedVar(), C->getLocation(),
Parent, ParentDC, SymbolRoleSet());
- // FIXME: Lambda init-captures.
return true;
}
diff --git a/clang/lib/Index/IndexTypeSourceInfo.cpp b/clang/lib/Index/IndexTypeSourceInfo.cpp
index b9fc90040cfc..ec4ca23942ca 100644
--- a/clang/lib/Index/IndexTypeSourceInfo.cpp
+++ b/clang/lib/Index/IndexTypeSourceInfo.cpp
@@ -8,6 +8,7 @@
#include "IndexingContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/ADT/ScopeExit.h"
using namespace clang;
using namespace index;
@@ -160,6 +161,26 @@ public:
return true;
}
+ bool TraverseTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL) {
+ if (!WalkUpFromTemplateSpecializationTypeLoc(TL))
+ return false;
+ if (!TraverseTemplateName(TL.getTypePtr()->getTemplateName()))
+ return false;
+
+ // The relations we have to `Parent` do not apply to our template arguments,
+ // so clear them while visiting the args.
+ SmallVector<SymbolRelation, 3> SavedRelations = Relations;
+ Relations.clear();
+ auto ResetSavedRelations =
+ llvm::make_scope_exit([&] { this->Relations = SavedRelations; });
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ if (!TraverseTemplateArgumentLoc(TL.getArgLoc(I)))
+ return false;
+ }
+
+ return true;
+ }
+
bool VisitDeducedTemplateSpecializationTypeLoc(DeducedTemplateSpecializationTypeLoc TL) {
auto *T = TL.getTypePtr();
if (!T)
diff --git a/clang/lib/Index/IndexingAction.cpp b/clang/lib/Index/IndexingAction.cpp
index e698c07133a9..4986303cac47 100644
--- a/clang/lib/Index/IndexingAction.cpp
+++ b/clang/lib/Index/IndexingAction.cpp
@@ -165,11 +165,20 @@ static void indexTranslationUnit(ASTUnit &Unit, IndexingContext &IndexCtx) {
static void indexPreprocessorMacros(const Preprocessor &PP,
IndexDataConsumer &DataConsumer) {
for (const auto &M : PP.macros())
- if (MacroDirective *MD = M.second.getLatest())
+ if (MacroDirective *MD = M.second.getLatest()) {
+ auto *MI = MD->getMacroInfo();
+ // When using modules, it may happen that we find #undef of a macro that
+ // was defined in another module. In such case, MI may be nullptr, since
+ // we only look for macro definitions in the current TU. In that case,
+ // there is nothing to index.
+ if (!MI)
+ continue;
+
DataConsumer.handleMacroOccurrence(
M.first, MD->getMacroInfo(),
static_cast<unsigned>(index::SymbolRole::Definition),
MD->getLocation());
+ }
}
void index::indexASTUnit(ASTUnit &Unit, IndexDataConsumer &DataConsumer,
diff --git a/clang/lib/Index/SimpleFormatContext.h b/clang/lib/Index/SimpleFormatContext.h
deleted file mode 100644
index 17793154a3ae..000000000000
--- a/clang/lib/Index/SimpleFormatContext.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//===--- SimpleFormatContext.h ----------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-///
-/// Defines a utility class for use of clang-format in libclang
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_INDEX_SIMPLEFORMATCONTEXT_H
-#define LLVM_CLANG_LIB_INDEX_SIMPLEFORMATCONTEXT_H
-
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/DiagnosticOptions.h"
-#include "clang/Basic/FileManager.h"
-#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Rewrite/Core/Rewriter.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Path.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace clang {
-namespace index {
-
-/// A small class to be used by libclang clients to format
-/// a declaration string in memory. This object is instantiated once
-/// and used each time a formatting is needed.
-class SimpleFormatContext {
-public:
- SimpleFormatContext(LangOptions Options)
- : DiagOpts(new DiagnosticOptions()),
- Diagnostics(new DiagnosticsEngine(new DiagnosticIDs, DiagOpts.get())),
- InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
- Files(FileSystemOptions(), InMemoryFileSystem),
- Sources(*Diagnostics, Files), Rewrite(Sources, Options) {
- Diagnostics->setClient(new IgnoringDiagConsumer, true);
- }
-
- FileID createInMemoryFile(StringRef Name, StringRef Content) {
- InMemoryFileSystem->addFile(Name, 0,
- llvm::MemoryBuffer::getMemBuffer(Content));
- const FileEntry *Entry = Files.getFile(Name);
- assert(Entry != nullptr);
- return Sources.createFileID(Entry, SourceLocation(), SrcMgr::C_User);
- }
-
- std::string getRewrittenText(FileID ID) {
- std::string Result;
- llvm::raw_string_ostream OS(Result);
- Rewrite.getEditBuffer(ID).write(OS);
- OS.flush();
- return Result;
- }
-
- IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
- IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem;
- FileManager Files;
- SourceManager Sources;
- Rewriter Rewrite;
-};
-
-} // end namespace index
-} // end namespace clang
-
-#endif
diff --git a/clang/lib/Index/USRGeneration.cpp b/clang/lib/Index/USRGeneration.cpp
index 0d1e81219823..abaeb1a4232f 100644
--- a/clang/lib/Index/USRGeneration.cpp
+++ b/clang/lib/Index/USRGeneration.cpp
@@ -729,6 +729,9 @@ void USRGenerator::VisitType(QualType T) {
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
diff --git a/clang/lib/IndexSerialization/SerializablePathCollection.cpp b/clang/lib/IndexSerialization/SerializablePathCollection.cpp
new file mode 100644
index 000000000000..34663738088e
--- /dev/null
+++ b/clang/lib/IndexSerialization/SerializablePathCollection.cpp
@@ -0,0 +1,91 @@
+//===--- SerializablePathCollection.cpp -- Index of paths -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/IndexSerialization/SerializablePathCollection.h"
+#include "llvm/Support/Path.h"
+
+using namespace llvm;
+using namespace clang;
+using namespace clang::index;
+
+StringPool::StringOffsetSize StringPool::add(StringRef Str) {
+ const std::size_t Offset = Buffer.size();
+ Buffer += Str;
+ return StringPool::StringOffsetSize(Offset, Str.size());
+}
+
+size_t PathPool::addFilePath(RootDirKind Root,
+ const StringPool::StringOffsetSize &Dir,
+ StringRef Filename) {
+ FilePaths.emplace_back(DirPath(Root, Dir), Paths.add(Filename));
+ return FilePaths.size() - 1;
+}
+
+StringPool::StringOffsetSize PathPool::addDirPath(StringRef Dir) {
+ return Paths.add(Dir);
+}
+
+llvm::ArrayRef<PathPool::FilePath> PathPool::getFilePaths() const {
+ return FilePaths;
+}
+
+StringRef PathPool::getPaths() const { return Paths.getBuffer(); }
+
+SerializablePathCollection::SerializablePathCollection(
+ StringRef CurrentWorkDir, StringRef SysRoot, llvm::StringRef OutputFile)
+ : WorkDir(CurrentWorkDir),
+ SysRoot(llvm::sys::path::parent_path(SysRoot).empty() ? StringRef()
+ : SysRoot),
+ WorkDirPath(Paths.addDirPath(WorkDir)),
+ SysRootPath(Paths.addDirPath(SysRoot)),
+ OutputFilePath(Paths.addDirPath(OutputFile)) {}
+
+size_t SerializablePathCollection::tryStoreFilePath(const FileEntry &FE) {
+ auto FileIt = UniqueFiles.find(&FE);
+ if (FileIt != UniqueFiles.end())
+ return FileIt->second;
+
+ const auto Dir = tryStoreDirPath(sys::path::parent_path(FE.getName()));
+ const auto FileIdx =
+ Paths.addFilePath(Dir.Root, Dir.Path, sys::path::filename(FE.getName()));
+
+ UniqueFiles.try_emplace(&FE, FileIdx);
+ return FileIdx;
+}
+
+PathPool::DirPath SerializablePathCollection::tryStoreDirPath(StringRef Dir) {
+ // We don't want to strip separator if Dir is "/" - so we check size > 1.
+ while (Dir.size() > 1 && llvm::sys::path::is_separator(Dir.back()))
+ Dir = Dir.drop_back();
+
+ auto DirIt = UniqueDirs.find(Dir);
+ if (DirIt != UniqueDirs.end())
+ return DirIt->second;
+
+ const std::string OrigDir = Dir.str();
+
+ PathPool::RootDirKind Root = PathPool::RootDirKind::Regular;
+ if (!SysRoot.empty() && Dir.startswith(SysRoot) &&
+ llvm::sys::path::is_separator(Dir[SysRoot.size()])) {
+ Root = PathPool::RootDirKind::SysRoot;
+ Dir = Dir.drop_front(SysRoot.size());
+ } else if (!WorkDir.empty() && Dir.startswith(WorkDir) &&
+ llvm::sys::path::is_separator(Dir[WorkDir.size()])) {
+ Root = PathPool::RootDirKind::CurrentWorkDir;
+ Dir = Dir.drop_front(WorkDir.size());
+ }
+
+ if (Root != PathPool::RootDirKind::Regular) {
+ while (!Dir.empty() && llvm::sys::path::is_separator(Dir.front()))
+ Dir = Dir.drop_front();
+ }
+
+ PathPool::DirPath Result(Root, Paths.addDirPath(Dir));
+ UniqueDirs.try_emplace(OrigDir, Result);
+ return Result;
+}
diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp
index 1df28cc07209..99c92e91aad5 100644
--- a/clang/lib/Lex/HeaderSearch.cpp
+++ b/clang/lib/Lex/HeaderSearch.cpp
@@ -164,14 +164,39 @@ std::string HeaderSearch::getPrebuiltModuleFileName(StringRef ModuleName,
return {};
}
+std::string HeaderSearch::getPrebuiltImplicitModuleFileName(Module *Module) {
+ const FileEntry *ModuleMap =
+ getModuleMap().getModuleMapFileForUniquing(Module);
+ StringRef ModuleName = Module->Name;
+ StringRef ModuleMapPath = ModuleMap->getName();
+ StringRef ModuleCacheHash = HSOpts->DisableModuleHash ? "" : getModuleHash();
+ for (const std::string &Dir : HSOpts->PrebuiltModulePaths) {
+ SmallString<256> CachePath(Dir);
+ llvm::sys::fs::make_absolute(CachePath);
+ llvm::sys::path::append(CachePath, ModuleCacheHash);
+ std::string FileName =
+ getCachedModuleFileNameImpl(ModuleName, ModuleMapPath, CachePath);
+ if (!FileName.empty() && getFileMgr().getFile(FileName))
+ return FileName;
+ }
+ return {};
+}
+
std::string HeaderSearch::getCachedModuleFileName(StringRef ModuleName,
StringRef ModuleMapPath) {
+ return getCachedModuleFileNameImpl(ModuleName, ModuleMapPath,
+ getModuleCachePath());
+}
+
+std::string HeaderSearch::getCachedModuleFileNameImpl(StringRef ModuleName,
+ StringRef ModuleMapPath,
+ StringRef CachePath) {
// If we don't have a module cache path or aren't supposed to use one, we
// can't do anything.
- if (getModuleCachePath().empty())
+ if (CachePath.empty())
return {};
- SmallString<256> Result(getModuleCachePath());
+ SmallString<256> Result(CachePath);
llvm::sys::fs::make_absolute(Result);
if (HSOpts->DisableModuleHash) {
@@ -765,8 +790,7 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
// This is the header that MSVC's header search would have found.
ModuleMap::KnownHeader MSSuggestedModule;
- const FileEntry *MSFE_FE = nullptr;
- StringRef MSFE_Name;
+ Optional<FileEntryRef> MSFE;
// Unless disabled, check to see if the file is in the #includer's
// directory. This cannot be based on CurDir, because each includer could be
@@ -841,8 +865,7 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
if (Diags.isIgnored(diag::ext_pp_include_search_ms, IncludeLoc)) {
return FE;
} else {
- MSFE_FE = &FE->getFileEntry();
- MSFE_Name = FE->getName();
+ MSFE = FE;
if (SuggestedModule) {
MSSuggestedModule = *SuggestedModule;
*SuggestedModule = ModuleMap::KnownHeader();
@@ -854,9 +877,6 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
}
}
- Optional<FileEntryRef> MSFE(MSFE_FE ? FileEntryRef(MSFE_Name, *MSFE_FE)
- : Optional<FileEntryRef>());
-
CurDir = nullptr;
// If this is a system #include, ignore the user #include locs.
@@ -1167,12 +1187,12 @@ HeaderFileInfo &HeaderSearch::getFileInfo(const FileEntry *FE) {
HeaderFileInfo *HFI = &FileInfo[FE->getUID()];
// FIXME: Use a generation count to check whether this is really up to date.
if (ExternalSource && !HFI->Resolved) {
- HFI->Resolved = true;
auto ExternalHFI = ExternalSource->GetHeaderFileInfo(FE);
-
- HFI = &FileInfo[FE->getUID()];
- if (ExternalHFI.External)
- mergeHeaderFileInfo(*HFI, ExternalHFI);
+ if (ExternalHFI.IsValid) {
+ HFI->Resolved = true;
+ if (ExternalHFI.External)
+ mergeHeaderFileInfo(*HFI, ExternalHFI);
+ }
}
HFI->IsValid = true;
@@ -1199,12 +1219,12 @@ HeaderSearch::getExistingFileInfo(const FileEntry *FE,
if (!WantExternal && (!HFI->IsValid || HFI->External))
return nullptr;
if (!HFI->Resolved) {
- HFI->Resolved = true;
auto ExternalHFI = ExternalSource->GetHeaderFileInfo(FE);
-
- HFI = &FileInfo[FE->getUID()];
- if (ExternalHFI.External)
- mergeHeaderFileInfo(*HFI, ExternalHFI);
+ if (ExternalHFI.IsValid) {
+ HFI->Resolved = true;
+ if (ExternalHFI.External)
+ mergeHeaderFileInfo(*HFI, ExternalHFI);
+ }
}
} else if (FE->getUID() >= FileInfo.size()) {
return nullptr;
diff --git a/clang/lib/Lex/Lexer.cpp b/clang/lib/Lex/Lexer.cpp
index a559ca3eac2b..34732b659771 100644
--- a/clang/lib/Lex/Lexer.cpp
+++ b/clang/lib/Lex/Lexer.cpp
@@ -13,7 +13,9 @@
#include "clang/Lex/Lexer.h"
#include "UnicodeCharSets.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -24,19 +26,16 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Lex/Token.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/LLVM.h"
-#include "clang/Basic/TokenKinds.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/Support/NativeFormatting.h"
#include "llvm/Support/UnicodeCharRanges.h"
#include <algorithm>
@@ -125,18 +124,21 @@ void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
// Default to not keeping comments.
ExtendedTokenMode = 0;
+
+ NewLinePtr = nullptr;
}
/// Lexer constructor - Create a new lexer object for the specified buffer
/// with the specified preprocessor managing the lexing process. This lexer
/// assumes that the associated file buffer and Preprocessor objects will
/// outlive it, so it doesn't take ownership of either of them.
-Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP)
+Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile,
+ Preprocessor &PP)
: PreprocessorLexer(&PP, FID),
FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
LangOpts(PP.getLangOpts()) {
- InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(),
- InputFile->getBufferEnd());
+ InitLexer(InputFile.getBufferStart(), InputFile.getBufferStart(),
+ InputFile.getBufferEnd());
resetExtendedTokenMode();
}
@@ -156,10 +158,10 @@ Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
/// Lexer constructor - Create a new raw lexer object. This object is only
/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
/// range will outlive it, so it doesn't take ownership of it.
-Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile,
+Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &FromFile,
const SourceManager &SM, const LangOptions &langOpts)
- : Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile->getBufferStart(),
- FromFile->getBufferStart(), FromFile->getBufferEnd()) {}
+ : Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile.getBufferStart(),
+ FromFile.getBufferStart(), FromFile.getBufferEnd()) {}
void Lexer::resetExtendedTokenMode() {
assert(PP && "Cannot reset token mode without a preprocessor");
@@ -192,7 +194,7 @@ Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
// Create the lexer as if we were going to lex the file normally.
FileID SpellingFID = SM.getFileID(SpellingLoc);
- const llvm::MemoryBuffer *InputFile = SM.getBuffer(SpellingFID);
+ llvm::MemoryBufferRef InputFile = SM.getBufferOrFake(SpellingFID);
Lexer *L = new Lexer(SpellingFID, InputFile, PP);
// Now that the lexer is created, change the start/end locations so that we
@@ -2197,6 +2199,15 @@ bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
unsigned char Char = *CurPtr;
+ const char *lastNewLine = nullptr;
+ auto setLastNewLine = [&](const char *Ptr) {
+ lastNewLine = Ptr;
+ if (!NewLinePtr)
+ NewLinePtr = Ptr;
+ };
+ if (SawNewline)
+ setLastNewLine(CurPtr - 1);
+
// Skip consecutive spaces efficiently.
while (true) {
// Skip horizontal whitespace very aggressively.
@@ -2214,6 +2225,8 @@ bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
}
// OK, but handle newline.
+ if (*CurPtr == '\n')
+ setLastNewLine(CurPtr);
SawNewline = true;
Char = *++CurPtr;
}
@@ -2237,6 +2250,12 @@ bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
if (SawNewline) {
Result.setFlag(Token::StartOfLine);
TokAtPhysicalStartOfLine = true;
+
+ if (NewLinePtr && lastNewLine && NewLinePtr != lastNewLine && PP) {
+ if (auto *Handler = PP->getEmptylineHandler())
+ Handler->HandleEmptyline(SourceRange(getSourceLocation(NewLinePtr + 1),
+ getSourceLocation(lastNewLine)));
+ }
}
BufferPtr = CurPtr;
@@ -2377,7 +2396,7 @@ bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
// contribute to another token), it isn't needed for correctness. Note that
// this is ok even in KeepWhitespaceMode, because we would have returned the
/// comment above in that mode.
- ++CurPtr;
+ NewLinePtr = CurPtr++;
// The next returned token is at the start of the line.
Result.setFlag(Token::StartOfLine);
@@ -3211,6 +3230,9 @@ LexNextToken:
char Char = getAndAdvanceChar(CurPtr, Result);
tok::TokenKind Kind;
+ if (!isVerticalWhitespace(Char))
+ NewLinePtr = nullptr;
+
switch (Char) {
case 0: // Null.
// Found end of file?
@@ -3265,6 +3287,7 @@ LexNextToken:
// Since we consumed a newline, we are back at the start of a line.
IsAtStartOfLine = true;
IsAtPhysicalStartOfLine = true;
+ NewLinePtr = CurPtr - 1;
Kind = tok::eod;
break;
diff --git a/clang/lib/Lex/LiteralSupport.cpp b/clang/lib/Lex/LiteralSupport.cpp
index eb16bc8c7da2..6c3cdbdf6492 100644
--- a/clang/lib/Lex/LiteralSupport.cpp
+++ b/clang/lib/Lex/LiteralSupport.cpp
@@ -1373,9 +1373,9 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
if (isWide())
PP.Diag(Loc, diag::warn_extraneous_char_constant);
else if (isAscii() && NumCharsSoFar == 4)
- PP.Diag(Loc, diag::ext_four_char_character_literal);
+ PP.Diag(Loc, diag::warn_four_char_character_literal);
else if (isAscii())
- PP.Diag(Loc, diag::ext_multichar_character_literal);
+ PP.Diag(Loc, diag::warn_multichar_character_literal);
else
PP.Diag(Loc, diag::err_multichar_utf_character_literal);
IsMultiChar = true;
diff --git a/clang/lib/Lex/ModuleMap.cpp b/clang/lib/Lex/ModuleMap.cpp
index bcdc5b8062a0..bbda1f15a702 100644
--- a/clang/lib/Lex/ModuleMap.cpp
+++ b/clang/lib/Lex/ModuleMap.cpp
@@ -171,23 +171,23 @@ static void appendSubframeworkPaths(Module *Mod,
llvm::sys::path::append(Path, "Frameworks", Paths[I-1] + ".framework");
}
-const FileEntry *ModuleMap::findHeader(
+Optional<FileEntryRef> ModuleMap::findHeader(
Module *M, const Module::UnresolvedHeaderDirective &Header,
SmallVectorImpl<char> &RelativePathName, bool &NeedsFramework) {
// Search for the header file within the module's home directory.
auto *Directory = M->Directory;
SmallString<128> FullPathName(Directory->getName());
- auto GetFile = [&](StringRef Filename) -> const FileEntry * {
- auto File = SourceMgr.getFileManager().getFile(Filename);
- if (!File ||
- (Header.Size && (*File)->getSize() != *Header.Size) ||
- (Header.ModTime && (*File)->getModificationTime() != *Header.ModTime))
- return nullptr;
+ auto GetFile = [&](StringRef Filename) -> Optional<FileEntryRef> {
+ auto File =
+ expectedToOptional(SourceMgr.getFileManager().getFileRef(Filename));
+ if (!File || (Header.Size && File->getSize() != *Header.Size) ||
+ (Header.ModTime && File->getModificationTime() != *Header.ModTime))
+ return None;
return *File;
};
- auto GetFrameworkFile = [&]() -> const FileEntry * {
+ auto GetFrameworkFile = [&]() -> Optional<FileEntryRef> {
unsigned FullPathLength = FullPathName.size();
appendSubframeworkPaths(M, RelativePathName);
unsigned RelativePathLength = RelativePathName.size();
@@ -195,7 +195,7 @@ const FileEntry *ModuleMap::findHeader(
// Check whether this file is in the public headers.
llvm::sys::path::append(RelativePathName, "Headers", Header.FileName);
llvm::sys::path::append(FullPathName, RelativePathName);
- if (auto *File = GetFile(FullPathName))
+ if (auto File = GetFile(FullPathName))
return File;
// Check whether this file is in the private headers.
@@ -227,7 +227,7 @@ const FileEntry *ModuleMap::findHeader(
// Lookup for normal headers.
llvm::sys::path::append(RelativePathName, Header.FileName);
llvm::sys::path::append(FullPathName, RelativePathName);
- auto *NormalHdrFile = GetFile(FullPathName);
+ auto NormalHdrFile = GetFile(FullPathName);
if (!NormalHdrFile && Directory->getName().endswith(".framework")) {
// The lack of 'framework' keyword in a module declaration it's a simple
@@ -241,7 +241,7 @@ const FileEntry *ModuleMap::findHeader(
<< Header.FileName << M->getFullModuleName();
NeedsFramework = true;
}
- return nullptr;
+ return None;
}
return NormalHdrFile;
@@ -251,18 +251,18 @@ void ModuleMap::resolveHeader(Module *Mod,
const Module::UnresolvedHeaderDirective &Header,
bool &NeedsFramework) {
SmallString<128> RelativePathName;
- if (const FileEntry *File =
+ if (Optional<FileEntryRef> File =
findHeader(Mod, Header, RelativePathName, NeedsFramework)) {
if (Header.IsUmbrella) {
- const DirectoryEntry *UmbrellaDir = File->getDir();
+ const DirectoryEntry *UmbrellaDir = &File->getDir().getDirEntry();
if (Module *UmbrellaMod = UmbrellaDirs[UmbrellaDir])
Diags.Report(Header.FileNameLoc, diag::err_mmap_umbrella_clash)
<< UmbrellaMod->getFullModuleName();
else
// Record this umbrella header.
- setUmbrellaHeader(Mod, File, RelativePathName.str());
+ setUmbrellaHeader(Mod, *File, RelativePathName.str());
} else {
- Module::Header H = {std::string(RelativePathName.str()), File};
+ Module::Header H = {std::string(RelativePathName.str()), *File};
if (Header.Kind == Module::HK_Excluded)
excludeHeader(Mod, H);
else
@@ -300,7 +300,7 @@ bool ModuleMap::resolveAsBuiltinHeader(
// supplied by Clang. Find that builtin header.
SmallString<128> Path;
llvm::sys::path::append(Path, BuiltinIncludeDir->getName(), Header.FileName);
- auto File = SourceMgr.getFileManager().getFile(Path);
+ auto File = SourceMgr.getFileManager().getOptionalFileRef(Path);
if (!File)
return false;
@@ -1012,7 +1012,7 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
// Look for an umbrella header.
SmallString<128> UmbrellaName = StringRef(FrameworkDir->getName());
llvm::sys::path::append(UmbrellaName, "Headers", ModuleName + ".h");
- auto UmbrellaHeader = FileMgr.getFile(UmbrellaName);
+ auto UmbrellaHeader = FileMgr.getOptionalFileRef(UmbrellaName);
// FIXME: If there's no umbrella header, we could probably scan the
// framework to load *everything*. But, it's not clear that this is a good
@@ -1121,23 +1121,21 @@ Module *ModuleMap::createShadowedModule(StringRef Name, bool IsFramework,
return Result;
}
-void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader,
+void ModuleMap::setUmbrellaHeader(Module *Mod, FileEntryRef UmbrellaHeader,
Twine NameAsWritten) {
Headers[UmbrellaHeader].push_back(KnownHeader(Mod, NormalHeader));
- Mod->Umbrella = UmbrellaHeader;
- Mod->HasUmbrellaDir = false;
+ Mod->Umbrella = &UmbrellaHeader.getMapEntry();
Mod->UmbrellaAsWritten = NameAsWritten.str();
- UmbrellaDirs[UmbrellaHeader->getDir()] = Mod;
+ UmbrellaDirs[UmbrellaHeader.getDir()] = Mod;
// Notify callbacks that we just added a new header.
for (const auto &Cb : Callbacks)
Cb->moduleMapAddUmbrellaHeader(&SourceMgr.getFileManager(), UmbrellaHeader);
}
-void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
+void ModuleMap::setUmbrellaDir(Module *Mod, DirectoryEntryRef UmbrellaDir,
Twine NameAsWritten) {
- Mod->Umbrella = UmbrellaDir;
- Mod->HasUmbrellaDir = true;
+ Mod->Umbrella = &UmbrellaDir.getMapEntry();
Mod->UmbrellaAsWritten = NameAsWritten.str();
UmbrellaDirs[UmbrellaDir] = Mod;
}
@@ -1903,18 +1901,16 @@ void ModuleMapParser::parseModuleDecl() {
continue;
}
- if (ActiveModule) {
- Diags.Report(Id[I].second, diag::err_mmap_missing_module_qualified)
- << Id[I].first
- << ActiveModule->getTopLevelModule()->getFullModuleName();
- } else {
- Diags.Report(Id[I].second, diag::err_mmap_expected_module_name);
- }
+ Diags.Report(Id[I].second, diag::err_mmap_missing_parent_module)
+ << Id[I].first << (ActiveModule != nullptr)
+ << (ActiveModule
+ ? ActiveModule->getTopLevelModule()->getFullModuleName()
+ : "");
HadError = true;
- return;
}
- if (ModuleMapFile != Map.getContainingModuleMapFile(TopLevelModule)) {
+ if (TopLevelModule &&
+ ModuleMapFile != Map.getContainingModuleMapFile(TopLevelModule)) {
assert(ModuleMapFile != Map.getModuleMapFileForUniquing(TopLevelModule) &&
"submodule defined in same file as 'module *' that allowed its "
"top-level module");
@@ -2420,15 +2416,15 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
}
// Look for this file.
- const DirectoryEntry *Dir = nullptr;
+ Optional<DirectoryEntryRef> Dir;
if (llvm::sys::path::is_absolute(DirName)) {
- if (auto D = SourceMgr.getFileManager().getDirectory(DirName))
+ if (auto D = SourceMgr.getFileManager().getOptionalDirectoryRef(DirName))
Dir = *D;
} else {
SmallString<128> PathName;
PathName = Directory->getName();
llvm::sys::path::append(PathName, DirName);
- if (auto D = SourceMgr.getFileManager().getDirectory(PathName))
+ if (auto D = SourceMgr.getFileManager().getOptionalDirectoryRef(PathName))
Dir = *D;
}
@@ -2449,7 +2445,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
SourceMgr.getFileManager().getVirtualFileSystem();
for (llvm::vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
I != E && !EC; I.increment(EC)) {
- if (auto FE = SourceMgr.getFileManager().getFile(I->path())) {
+ if (auto FE = SourceMgr.getFileManager().getOptionalFileRef(I->path())) {
Module::Header Header = {std::string(I->path()), *FE};
Headers.push_back(std::move(Header));
}
@@ -2463,7 +2459,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
return;
}
- if (Module *OwningModule = Map.UmbrellaDirs[Dir]) {
+ if (Module *OwningModule = Map.UmbrellaDirs[*Dir]) {
Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
<< OwningModule->getFullModuleName();
HadError = true;
@@ -2471,7 +2467,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
}
// Record this umbrella directory.
- Map.setUmbrellaDir(ActiveModule, Dir, DirName);
+ Map.setUmbrellaDir(ActiveModule, *Dir, DirName);
}
/// Parse a module export declaration.
@@ -3006,7 +3002,7 @@ bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem,
}
assert(Target && "Missing target information");
- const llvm::MemoryBuffer *Buffer = SourceMgr.getBuffer(ID);
+ llvm::Optional<llvm::MemoryBufferRef> Buffer = SourceMgr.getBufferOrNone(ID);
if (!Buffer)
return ParsedModuleMap[File] = true;
assert((!Offset || *Offset <= Buffer->getBufferSize()) &&
diff --git a/clang/lib/Lex/PPDirectives.cpp b/clang/lib/Lex/PPDirectives.cpp
index 053ef1d2dd18..d6b03d85913d 100644
--- a/clang/lib/Lex/PPDirectives.cpp
+++ b/clang/lib/Lex/PPDirectives.cpp
@@ -379,8 +379,12 @@ Optional<unsigned> Preprocessor::getSkippedRangeForExcludedConditionalBlock(
std::pair<FileID, unsigned> HashFileOffset =
SourceMgr.getDecomposedLoc(HashLoc);
- const llvm::MemoryBuffer *Buf = SourceMgr.getBuffer(HashFileOffset.first);
- auto It = ExcludedConditionalDirectiveSkipMappings->find(Buf);
+ Optional<llvm::MemoryBufferRef> Buf =
+ SourceMgr.getBufferOrNone(HashFileOffset.first);
+ if (!Buf)
+ return None;
+ auto It =
+ ExcludedConditionalDirectiveSkipMappings->find(Buf->getBufferStart());
if (It == ExcludedConditionalDirectiveSkipMappings->end())
return None;
@@ -2057,7 +2061,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// some directives (e.g. #endif of a header guard) will never be seen.
// Since this will lead to confusing errors, avoid the inclusion.
if (Action == Enter && File && PreambleConditionalStack.isRecording() &&
- SourceMgr.isMainFile(*File)) {
+ SourceMgr.isMainFile(File->getFileEntry())) {
Diag(FilenameTok.getLocation(),
diag::err_pp_including_mainfile_in_preamble);
return {ImportAction::None};
@@ -2397,7 +2401,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
diag::ext_variadic_macro);
// OpenCL v1.2 s6.9.e: variadic macros are not supported.
- if (LangOpts.OpenCL) {
+ if (LangOpts.OpenCL && !LangOpts.OpenCLCPlusPlus) {
Diag(Tok, diag::ext_pp_opencl_variadic_macros);
}
diff --git a/clang/lib/Lex/PPLexerChange.cpp b/clang/lib/Lex/PPLexerChange.cpp
index b7c7e2693ef1..b979b965f46a 100644
--- a/clang/lib/Lex/PPLexerChange.cpp
+++ b/clang/lib/Lex/PPLexerChange.cpp
@@ -11,16 +11,16 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Lex/Preprocessor.h"
-#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/Support/Path.h"
using namespace clang;
@@ -73,10 +73,9 @@ bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
MaxIncludeStackDepth = IncludeMacroStack.size();
// Get the MemoryBuffer for this FID, if it fails, we fail.
- bool Invalid = false;
- const llvm::MemoryBuffer *InputFile =
- getSourceManager().getBuffer(FID, Loc, &Invalid);
- if (Invalid) {
+ llvm::Optional<llvm::MemoryBufferRef> InputFile =
+ getSourceManager().getBufferOrNone(FID, Loc);
+ if (!InputFile) {
SourceLocation FileStart = SourceMgr.getLocForStartOfFile(FID);
Diag(Loc, diag::err_pp_error_opening_file)
<< std::string(SourceMgr.getBufferName(FileStart)) << "";
@@ -90,7 +89,7 @@ bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
CodeCompletionFileLoc.getLocWithOffset(CodeCompletionOffset);
}
- EnterSourceFileWithLexer(new Lexer(FID, InputFile, *this), CurDir);
+ EnterSourceFileWithLexer(new Lexer(FID, *InputFile, *this), CurDir);
return false;
}
@@ -263,10 +262,12 @@ static void collectAllSubModulesWithUmbrellaHeader(
}
void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
- assert(Mod.getUmbrellaHeader() && "Module must use umbrella header");
- SourceLocation StartLoc =
- SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
- if (getDiagnostics().isIgnored(diag::warn_uncovered_module_header, StartLoc))
+ const Module::Header &UmbrellaHeader = Mod.getUmbrellaHeader();
+ assert(UmbrellaHeader.Entry && "Module must use umbrella header");
+ const FileID &File = SourceMgr.translateFile(UmbrellaHeader.Entry);
+ SourceLocation ExpectedHeadersLoc = SourceMgr.getLocForEndOfFile(File);
+ if (getDiagnostics().isIgnored(diag::warn_uncovered_module_header,
+ ExpectedHeadersLoc))
return;
ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
@@ -291,7 +292,7 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
// Find the relative path that would access this header.
SmallString<128> RelativePath;
computeRelativePath(FileMgr, Dir, *Header, RelativePath);
- Diag(StartLoc, diag::warn_uncovered_module_header)
+ Diag(ExpectedHeadersLoc, diag::warn_uncovered_module_header)
<< Mod.getFullModuleName() << RelativePath;
}
}
diff --git a/clang/lib/Lex/PPMacroExpansion.cpp b/clang/lib/Lex/PPMacroExpansion.cpp
index 4908594d6081..43d31d6c5732 100644
--- a/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/clang/lib/Lex/PPMacroExpansion.cpp
@@ -345,7 +345,7 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident_Pragma = RegisterBuiltinMacro(*this, "_Pragma");
// C++ Standing Document Extensions.
- if (LangOpts.CPlusPlus)
+ if (getLangOpts().CPlusPlus)
Ident__has_cpp_attribute =
RegisterBuiltinMacro(*this, "__has_cpp_attribute");
else
@@ -357,7 +357,7 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__TIMESTAMP__ = RegisterBuiltinMacro(*this, "__TIMESTAMP__");
// Microsoft Extensions.
- if (LangOpts.MicrosoftExt) {
+ if (getLangOpts().MicrosoftExt) {
Ident__identifier = RegisterBuiltinMacro(*this, "__identifier");
Ident__pragma = RegisterBuiltinMacro(*this, "__pragma");
} else {
@@ -371,7 +371,7 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__has_extension = RegisterBuiltinMacro(*this, "__has_extension");
Ident__has_builtin = RegisterBuiltinMacro(*this, "__has_builtin");
Ident__has_attribute = RegisterBuiltinMacro(*this, "__has_attribute");
- if (!LangOpts.CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
Ident__has_c_attribute = RegisterBuiltinMacro(*this, "__has_c_attribute");
else
Ident__has_c_attribute = nullptr;
@@ -389,7 +389,7 @@ void Preprocessor::RegisterBuiltinMacros() {
// Modules.
Ident__building_module = RegisterBuiltinMacro(*this, "__building_module");
- if (!LangOpts.CurrentModule.empty())
+ if (!getLangOpts().CurrentModule.empty())
Ident__MODULE__ = RegisterBuiltinMacro(*this, "__MODULE__");
else
Ident__MODULE__ = nullptr;
@@ -889,10 +889,10 @@ MacroArgs *Preprocessor::ReadMacroCallArgumentList(Token &MacroName,
// Empty arguments are standard in C99 and C++0x, and are supported as an
// extension in other modes.
- if (ArgTokens.size() == ArgTokenStart && !LangOpts.C99)
- Diag(Tok, LangOpts.CPlusPlus11 ?
- diag::warn_cxx98_compat_empty_fnmacro_arg :
- diag::ext_empty_fnmacro_arg);
+ if (ArgTokens.size() == ArgTokenStart && !getLangOpts().C99)
+ Diag(Tok, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_empty_fnmacro_arg
+ : diag::ext_empty_fnmacro_arg);
// Add a marker EOF token to the end of the token list for this argument.
Token EOFTok;
@@ -1628,7 +1628,6 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_feature_check_malformed);
- const LangOptions &LangOpts = getLangOpts();
if (!II)
return false;
else if (II->getBuiltinID() != 0) {
@@ -1664,8 +1663,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
} else {
return llvm::StringSwitch<bool>(II->getName())
// Report builtin templates as being builtins.
- .Case("__make_integer_seq", LangOpts.CPlusPlus)
- .Case("__type_pack_element", LangOpts.CPlusPlus)
+ .Case("__make_integer_seq", getLangOpts().CPlusPlus)
+ .Case("__type_pack_element", getLangOpts().CPlusPlus)
// Likewise for some builtin preprocessor macros.
// FIXME: This is inconsistent; we usually suggest detecting
// builtin macros via #ifdef. Don't add more cases here.
@@ -1694,8 +1693,14 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_feature_check_malformed);
- return II ? hasAttribute(AttrSyntax::Declspec, nullptr, II,
- getTargetInfo(), getLangOpts()) : 0;
+ if (II) {
+ const LangOptions &LangOpts = getLangOpts();
+ return LangOpts.DeclSpecKeyword &&
+ hasAttribute(AttrSyntax::Declspec, nullptr, II,
+ getTargetInfo(), LangOpts);
+ }
+
+ return false;
});
} else if (II == Ident__has_cpp_attribute ||
II == Ident__has_c_attribute) {
diff --git a/clang/lib/Lex/Pragma.cpp b/clang/lib/Lex/Pragma.cpp
index b512a547de7d..a05df060813e 100644
--- a/clang/lib/Lex/Pragma.cpp
+++ b/clang/lib/Lex/Pragma.cpp
@@ -1356,7 +1356,7 @@ struct PragmaWarningHandler : public PragmaHandler {
while (Tok.is(tok::numeric_constant)) {
uint64_t Value;
if (!PP.parseSimpleIntegerLiteral(Tok, Value) || Value == 0 ||
- Value > std::numeric_limits<int>::max()) {
+ Value > INT_MAX) {
PP.Diag(Tok, diag::warn_pragma_warning_expected_number);
return;
}
diff --git a/clang/lib/Lex/Preprocessor.cpp b/clang/lib/Lex/Preprocessor.cpp
index 160e2b6ed884..94f1ce91f884 100644
--- a/clang/lib/Lex/Preprocessor.cpp
+++ b/clang/lib/Lex/Preprocessor.cpp
@@ -395,12 +395,10 @@ bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
assert(CompleteLine && CompleteColumn && "Starts from 1:1");
assert(!CodeCompletionFile && "Already set");
- using llvm::MemoryBuffer;
-
// Load the actual file's contents.
- bool Invalid = false;
- const MemoryBuffer *Buffer = SourceMgr.getMemoryBufferForFile(File, &Invalid);
- if (Invalid)
+ Optional<llvm::MemoryBufferRef> Buffer =
+ SourceMgr.getMemoryBufferForFileOrNone(File);
+ if (!Buffer)
return true;
// Find the byte position of the truncation point.
@@ -969,8 +967,10 @@ void Preprocessor::Lex(Token &Result) {
LastTokenWasAt = Result.is(tok::at);
--LexLevel;
- if (LexLevel == 0 && !Result.getFlag(Token::IsReinjected)) {
- ++TokenCount;
+ if ((LexLevel == 0 || PreprocessToken) &&
+ !Result.getFlag(Token::IsReinjected)) {
+ if (LexLevel == 0)
+ ++TokenCount;
if (OnToken)
OnToken(Result);
}
@@ -1415,6 +1415,8 @@ ModuleLoader::~ModuleLoader() = default;
CommentHandler::~CommentHandler() = default;
+EmptylineHandler::~EmptylineHandler() = default;
+
CodeCompletionHandler::~CodeCompletionHandler() = default;
void Preprocessor::createPreprocessingRecord() {
diff --git a/clang/lib/Lex/ScratchBuffer.cpp b/clang/lib/Lex/ScratchBuffer.cpp
index 19ab93ec54b4..51435225a676 100644
--- a/clang/lib/Lex/ScratchBuffer.cpp
+++ b/clang/lib/Lex/ScratchBuffer.cpp
@@ -37,10 +37,10 @@ SourceLocation ScratchBuffer::getToken(const char *Buf, unsigned Len,
else {
// Clear out the source line cache if it's already been computed.
// FIXME: Allow this to be incrementally extended.
- auto *ContentCache = const_cast<SrcMgr::ContentCache *>(
- SourceMgr.getSLocEntry(SourceMgr.getFileID(BufferStartLoc))
- .getFile().getContentCache());
- ContentCache->SourceLineCache = nullptr;
+ SourceMgr.getSLocEntry(SourceMgr.getFileID(BufferStartLoc))
+ .getFile()
+ .getContentCache()
+ .SourceLineCache = SrcMgr::LineOffsetMapping();
}
// Prefix the token with a \n, so that it looks like it is the first thing on
diff --git a/clang/lib/Lex/TokenLexer.cpp b/clang/lib/Lex/TokenLexer.cpp
index da5681aaf478..97cb2cf0bb8c 100644
--- a/clang/lib/Lex/TokenLexer.cpp
+++ b/clang/lib/Lex/TokenLexer.cpp
@@ -148,12 +148,12 @@ bool TokenLexer::MaybeRemoveCommaBeforeVaArgs(
return false;
// GCC removes the comma in the expansion of " ... , ## __VA_ARGS__ " if
- // __VA_ARGS__ is empty, but not in strict C99 mode where there are no
- // named arguments, where it remains. In all other modes, including C99
- // with GNU extensions, it is removed regardless of named arguments.
+ // __VA_ARGS__ is empty, but not in strict mode where there are no
+ // named arguments, where it remains. With GNU extensions, it is removed
+ // regardless of named arguments.
// Microsoft also appears to support this extension, unofficially.
- if (PP.getLangOpts().C99 && !PP.getLangOpts().GNUMode
- && Macro->getNumParams() < 2)
+ if (!PP.getLangOpts().GNUMode && !PP.getLangOpts().MSVCCompat &&
+ Macro->getNumParams() < 2)
return false;
// Is a comma available to be removed?
diff --git a/clang/lib/Parse/ParseCXXInlineMethods.cpp b/clang/lib/Parse/ParseCXXInlineMethods.cpp
index d05332b5ac5a..b0335905b6f8 100644
--- a/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -108,7 +108,7 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(
// or if we are about to parse function member template then consume
// the tokens and store them for parsing at the end of the translation unit.
if (getLangOpts().DelayedTemplateParsing &&
- D.getFunctionDefinitionKind() == FDK_Definition &&
+ D.getFunctionDefinitionKind() == FunctionDefinitionKind::Definition &&
!D.getDeclSpec().hasConstexprSpecifier() &&
!(FnD && FnD->getAsFunction() &&
FnD->getAsFunction()->getReturnType()->getContainedAutoType()) &&
@@ -405,14 +405,21 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
ConsumeAnyToken();
} else if (HasUnparsed) {
assert(Param->hasInheritedDefaultArg());
- FunctionDecl *Old = cast<FunctionDecl>(LM.Method)->getPreviousDecl();
- ParmVarDecl *OldParam = Old->getParamDecl(I);
- assert (!OldParam->hasUnparsedDefaultArg());
- if (OldParam->hasUninstantiatedDefaultArg())
- Param->setUninstantiatedDefaultArg(
- OldParam->getUninstantiatedDefaultArg());
+ const FunctionDecl *Old;
+ if (const auto *FunTmpl = dyn_cast<FunctionTemplateDecl>(LM.Method))
+ Old =
+ cast<FunctionDecl>(FunTmpl->getTemplatedDecl())->getPreviousDecl();
else
- Param->setDefaultArg(OldParam->getInit());
+ Old = cast<FunctionDecl>(LM.Method)->getPreviousDecl();
+ if (Old) {
+ ParmVarDecl *OldParam = const_cast<ParmVarDecl*>(Old->getParamDecl(I));
+ assert(!OldParam->hasUnparsedDefaultArg());
+ if (OldParam->hasUninstantiatedDefaultArg())
+ Param->setUninstantiatedDefaultArg(
+ OldParam->getUninstantiatedDefaultArg());
+ else
+ Param->setDefaultArg(OldParam->getInit());
+ }
}
}
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index c87d240a8206..571164139630 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -23,6 +23,7 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
@@ -452,6 +453,10 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
ParseObjCBridgeRelatedAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Syntax);
return;
+ } else if (AttrKind == ParsedAttr::AT_SwiftNewType) {
+ ParseSwiftNewTypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+ ScopeLoc, Syntax);
+ return;
} else if (AttrKind == ParsedAttr::AT_TypeTagForDatatype) {
ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Syntax);
@@ -506,6 +511,10 @@ unsigned Parser::ParseClangAttributeArgs(
ParseObjCBridgeRelatedAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Syntax);
break;
+ case ParsedAttr::AT_SwiftNewType:
+ ParseSwiftNewTypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+ ScopeLoc, Syntax);
+ break;
case ParsedAttr::AT_TypeTagForDatatype:
ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Syntax);
@@ -824,6 +833,7 @@ void Parser::ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs) {
switch (Tok.getKind()) {
case tok::kw__Nonnull:
case tok::kw__Nullable:
+ case tok::kw__Nullable_result:
case tok::kw__Null_unspecified: {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
@@ -1109,7 +1119,7 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
// Also reject wide string literals.
if (StringLiteral *MessageStringLiteral =
cast_or_null<StringLiteral>(MessageExpr.get())) {
- if (MessageStringLiteral->getCharByteWidth() != 1) {
+ if (!MessageStringLiteral->isAscii()) {
Diag(MessageStringLiteral->getSourceRange().getBegin(),
diag::err_expected_string_literal)
<< /*Source='availability attribute'*/ 2;
@@ -1409,6 +1419,49 @@ void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
Syntax);
}
+
+void Parser::ParseSwiftNewTypeAttribute(
+ IdentifierInfo &AttrName, SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ // Opening '('
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ return;
+ }
+
+ if (Tok.is(tok::r_paren)) {
+ Diag(Tok.getLocation(), diag::err_argument_required_after_attribute);
+ T.consumeClose();
+ return;
+ }
+ if (Tok.isNot(tok::kw_struct) && Tok.isNot(tok::kw_enum)) {
+ Diag(Tok, diag::warn_attribute_type_not_supported)
+ << &AttrName << Tok.getIdentifierInfo();
+ if (!isTokenSpecial())
+ ConsumeToken();
+ T.consumeClose();
+ return;
+ }
+
+ auto *SwiftType = IdentifierLoc::create(Actions.Context, Tok.getLocation(),
+ Tok.getIdentifierInfo());
+ ConsumeToken();
+
+ // Closing ')'
+ if (T.consumeClose())
+ return;
+ if (EndLoc)
+ *EndLoc = T.getCloseLocation();
+
+ ArgsUnion Args[] = {SwiftType};
+ Attrs.addNew(&AttrName, SourceRange(AttrNameLoc, T.getCloseLocation()),
+ ScopeName, ScopeLoc, Args, llvm::array_lengthof(Args), Syntax);
+}
+
+
void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
@@ -1541,7 +1594,8 @@ void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
if (!AL.isCXX11Attribute() && !AL.isC2xAttribute())
continue;
if (AL.getKind() == ParsedAttr::UnknownAttribute)
- Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored) << AL;
+ Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
+ << AL << AL.getRange();
else {
Diag(AL.getLoc(), DiagID) << AL;
AL.setInvalid();
@@ -1721,12 +1775,11 @@ bool Parser::MightBeDeclarator(DeclaratorContext Context) {
return getLangOpts().CPlusPlus;
case tok::l_square: // Might be an attribute on an unnamed bit-field.
- return Context == DeclaratorContext::MemberContext &&
- getLangOpts().CPlusPlus11 && NextToken().is(tok::l_square);
+ return Context == DeclaratorContext::Member && getLangOpts().CPlusPlus11 &&
+ NextToken().is(tok::l_square);
case tok::colon: // Might be a typo for '::' or an unnamed bit-field.
- return Context == DeclaratorContext::MemberContext ||
- getLangOpts().CPlusPlus;
+ return Context == DeclaratorContext::Member || getLangOpts().CPlusPlus;
case tok::identifier:
switch (NextToken().getKind()) {
@@ -1752,9 +1805,8 @@ bool Parser::MightBeDeclarator(DeclaratorContext Context) {
// At namespace scope, 'identifier:' is probably a typo for 'identifier::'
// and in block scope it's probably a label. Inside a class definition,
// this is a bit-field.
- return Context == DeclaratorContext::MemberContext ||
- (getLangOpts().CPlusPlus &&
- Context == DeclaratorContext::FileContext);
+ return Context == DeclaratorContext::Member ||
+ (getLangOpts().CPlusPlus && Context == DeclaratorContext::File);
case tok::identifier: // Possible virt-specifier.
return getLangOpts().CPlusPlus11 && isCXX11VirtSpecifier(NextToken());
@@ -1912,7 +1964,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// Function definitions are only allowed at file scope and in C++ classes.
// The C++ inline method definition case is handled elsewhere, so we only
// need to handle the file scope definition case.
- if (Context == DeclaratorContext::FileContext) {
+ if (Context == DeclaratorContext::File) {
if (isStartOfFunctionDefinition(D)) {
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
Diag(Tok, diag::err_function_declared_typedef);
@@ -1991,7 +2043,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
if (FirstDecl)
DeclsInGroup.push_back(FirstDecl);
- bool ExpectSemi = Context != DeclaratorContext::ForContext;
+ bool ExpectSemi = Context != DeclaratorContext::ForInit;
// If we don't have a comma, it is either the end of the list (a ';') or an
// error, bail out.
@@ -2042,10 +2094,10 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
if (DeclEnd)
*DeclEnd = Tok.getLocation();
- if (ExpectSemi &&
- ExpectAndConsumeSemi(Context == DeclaratorContext::FileContext
- ? diag::err_invalid_token_after_toplevel_declarator
- : diag::err_expected_semi_declaration)) {
+ if (ExpectSemi && ExpectAndConsumeSemi(
+ Context == DeclaratorContext::File
+ ? diag::err_invalid_token_after_toplevel_declarator
+ : diag::err_expected_semi_declaration)) {
// Okay, there was no semicolon and one was expected. If we see a
// declaration specifier, just assume it was missing and continue parsing.
// Otherwise things are very confused and we skip to recover.
@@ -2141,8 +2193,24 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
}
};
- // Inform the current actions module that we just parsed this declarator.
+ enum class InitKind { Uninitialized, Equal, CXXDirect, CXXBraced };
+ InitKind TheInitKind;
+ // If a '==' or '+=' is found, suggest a fixit to '='.
+ if (isTokenEqualOrEqualTypo())
+ TheInitKind = InitKind::Equal;
+ else if (Tok.is(tok::l_paren))
+ TheInitKind = InitKind::CXXDirect;
+ else if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace) &&
+ (!CurParsedObjCImpl || !D.isFunctionDeclarator()))
+ TheInitKind = InitKind::CXXBraced;
+ else
+ TheInitKind = InitKind::Uninitialized;
+ if (TheInitKind != InitKind::Uninitialized)
+ D.setHasInitializer();
+
+ // Inform Sema that we just parsed this declarator.
Decl *ThisDecl = nullptr;
+ Decl *OuterDecl = nullptr;
switch (TemplateInfo.Kind) {
case ParsedTemplateInfo::NonTemplate:
ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
@@ -2153,10 +2221,12 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
ThisDecl = Actions.ActOnTemplateDeclarator(getCurScope(),
*TemplateInfo.TemplateParams,
D);
- if (VarTemplateDecl *VT = dyn_cast_or_null<VarTemplateDecl>(ThisDecl))
+ if (VarTemplateDecl *VT = dyn_cast_or_null<VarTemplateDecl>(ThisDecl)) {
// Re-direct this decl to refer to the templated decl so that we can
// initialize it.
ThisDecl = VT->getTemplatedDecl();
+ OuterDecl = VT;
+ }
break;
}
case ParsedTemplateInfo::ExplicitInstantiation: {
@@ -2200,9 +2270,9 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
}
}
+ switch (TheInitKind) {
// Parse declarator '=' initializer.
- // If a '==' or '+=' is found, suggest a fixit to '='.
- if (isTokenEqualOrEqualTypo()) {
+ case InitKind::Equal: {
SourceLocation EqualLoc = ConsumeToken();
if (Tok.is(tok::kw_delete)) {
@@ -2248,8 +2318,8 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
if (Init.isInvalid()) {
SmallVector<tok::TokenKind, 2> StopTokens;
StopTokens.push_back(tok::comma);
- if (D.getContext() == DeclaratorContext::ForContext ||
- D.getContext() == DeclaratorContext::InitStmtContext)
+ if (D.getContext() == DeclaratorContext::ForInit ||
+ D.getContext() == DeclaratorContext::SelectionInit)
StopTokens.push_back(tok::r_paren);
SkipUntil(StopTokens, StopAtSemi | StopBeforeMatch);
Actions.ActOnInitializerError(ThisDecl);
@@ -2257,7 +2327,9 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
Actions.AddInitializerToDecl(ThisDecl, Init.get(),
/*DirectInit=*/false);
}
- } else if (Tok.is(tok::l_paren)) {
+ break;
+ }
+ case InitKind::CXXDirect: {
// Parse C++ direct initializer: '(' expression-list ')'
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -2311,8 +2383,9 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
Actions.AddInitializerToDecl(ThisDecl, Initializer.get(),
/*DirectInit=*/true);
}
- } else if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace) &&
- (!CurParsedObjCImpl || !D.isFunctionDeclarator())) {
+ break;
+ }
+ case InitKind::CXXBraced: {
// Parse C++0x braced-init-list.
Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
@@ -2327,14 +2400,16 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
Actions.ActOnInitializerError(ThisDecl);
} else
Actions.AddInitializerToDecl(ThisDecl, Init.get(), /*DirectInit=*/true);
-
- } else {
+ break;
+ }
+ case InitKind::Uninitialized: {
Actions.ActOnUninitializedDecl(ThisDecl);
+ break;
+ }
}
Actions.FinalizeDeclaration(ThisDecl);
-
- return ThisDecl;
+ return OuterDecl ? OuterDecl : ThisDecl;
}
/// ParseSpecifierQualifierList
@@ -2385,7 +2460,7 @@ void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS,
// Issue diagnostic and remove constexpr specifier if present.
if (DS.hasConstexprSpecifier() && DSC != DeclSpecContext::DSC_condition) {
Diag(DS.getConstexprSpecLoc(), diag::err_typename_invalid_constexpr)
- << DS.getConstexprSpecifier();
+ << static_cast<int>(DS.getConstexprSpecifier());
DS.ClearConstexprSpec();
}
}
@@ -2656,20 +2731,20 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
/// DeclaratorContext enumerator values.
Parser::DeclSpecContext
Parser::getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context) {
- if (Context == DeclaratorContext::MemberContext)
+ if (Context == DeclaratorContext::Member)
return DeclSpecContext::DSC_class;
- if (Context == DeclaratorContext::FileContext)
+ if (Context == DeclaratorContext::File)
return DeclSpecContext::DSC_top_level;
- if (Context == DeclaratorContext::TemplateParamContext)
+ if (Context == DeclaratorContext::TemplateParam)
return DeclSpecContext::DSC_template_param;
- if (Context == DeclaratorContext::TemplateArgContext ||
- Context == DeclaratorContext::TemplateTypeArgContext)
+ if (Context == DeclaratorContext::TemplateArg ||
+ Context == DeclaratorContext::TemplateTypeArg)
return DeclSpecContext::DSC_template_type_arg;
- if (Context == DeclaratorContext::TrailingReturnContext ||
- Context == DeclaratorContext::TrailingReturnVarContext)
+ if (Context == DeclaratorContext::TrailingReturn ||
+ Context == DeclaratorContext::TrailingReturnVar)
return DeclSpecContext::DSC_trailing;
- if (Context == DeclaratorContext::AliasDeclContext ||
- Context == DeclaratorContext::AliasTemplateContext)
+ if (Context == DeclaratorContext::AliasDecl ||
+ Context == DeclaratorContext::AliasTemplate)
return DeclSpecContext::DSC_alias_declaration;
return DeclSpecContext::DSC_normal;
}
@@ -2837,7 +2912,7 @@ Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
case Sema::NC_Unknown:
case Sema::NC_NonType:
case Sema::NC_DependentNonType:
- case Sema::NC_ContextIndependentExpr:
+ case Sema::NC_OverloadSet:
case Sema::NC_VarTemplate:
case Sema::NC_FunctionTemplate:
case Sema::NC_Concept:
@@ -3462,6 +3537,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Nullability type specifiers.
case tok::kw__Nonnull:
case tok::kw__Nullable:
+ case tok::kw__Nullable_result:
case tok::kw__Null_unspecified:
ParseNullabilityTypeSpecifiers(DS.getAttributes());
continue;
@@ -3554,12 +3630,13 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw_virtual:
// C++ for OpenCL does not allow virtual function qualifier, to avoid
// function pointers restricted in OpenCL v2.0 s6.9.a.
- if (getLangOpts().OpenCLCPlusPlus) {
+ if (getLangOpts().OpenCLCPlusPlus &&
+ !getActions().getOpenCLOptions().isEnabled(
+ "__cl_clang_function_pointers")) {
DiagID = diag::err_openclcxx_virtual_function;
PrevSpec = Tok.getIdentifierInfo()->getNameStart();
isInvalid = true;
- }
- else {
+ } else {
isInvalid = DS.setFunctionSpecVirtual(Loc, PrevSpec, DiagID);
}
break;
@@ -3626,38 +3703,41 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// constexpr, consteval, constinit specifiers
case tok::kw_constexpr:
- isInvalid = DS.SetConstexprSpec(CSK_constexpr, Loc, PrevSpec, DiagID);
+ isInvalid = DS.SetConstexprSpec(ConstexprSpecKind::Constexpr, Loc,
+ PrevSpec, DiagID);
break;
case tok::kw_consteval:
- isInvalid = DS.SetConstexprSpec(CSK_consteval, Loc, PrevSpec, DiagID);
+ isInvalid = DS.SetConstexprSpec(ConstexprSpecKind::Consteval, Loc,
+ PrevSpec, DiagID);
break;
case tok::kw_constinit:
- isInvalid = DS.SetConstexprSpec(CSK_constinit, Loc, PrevSpec, DiagID);
+ isInvalid = DS.SetConstexprSpec(ConstexprSpecKind::Constinit, Loc,
+ PrevSpec, DiagID);
break;
// type-specifier
case tok::kw_short:
- isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec,
+ isInvalid = DS.SetTypeSpecWidth(TypeSpecifierWidth::Short, Loc, PrevSpec,
DiagID, Policy);
break;
case tok::kw_long:
- if (DS.getTypeSpecWidth() != DeclSpec::TSW_long)
- isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec,
+ if (DS.getTypeSpecWidth() != TypeSpecifierWidth::Long)
+ isInvalid = DS.SetTypeSpecWidth(TypeSpecifierWidth::Long, Loc, PrevSpec,
DiagID, Policy);
else
- isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec,
- DiagID, Policy);
+ isInvalid = DS.SetTypeSpecWidth(TypeSpecifierWidth::LongLong, Loc,
+ PrevSpec, DiagID, Policy);
break;
case tok::kw___int64:
- isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec,
- DiagID, Policy);
+ isInvalid = DS.SetTypeSpecWidth(TypeSpecifierWidth::LongLong, Loc,
+ PrevSpec, DiagID, Policy);
break;
case tok::kw_signed:
- isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec,
- DiagID);
+ isInvalid =
+ DS.SetTypeSpecSign(TypeSpecifierSign::Signed, Loc, PrevSpec, DiagID);
break;
case tok::kw_unsigned:
- isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec,
+ isInvalid = DS.SetTypeSpecSign(TypeSpecifierSign::Unsigned, Loc, PrevSpec,
DiagID);
break;
case tok::kw__Complex:
@@ -4061,8 +4141,13 @@ void Parser::ParseStructDeclaration(
DeclaratorInfo.D.setCommaLoc(CommaLoc);
// Attributes are only allowed here on successive declarators.
- if (!FirstDeclarator)
+ if (!FirstDeclarator) {
+ // However, this does not apply for [[]] attributes (which could show up
+ // before or after the __attribute__ attributes).
+ DiagnoseAndSkipCXX11Attributes();
MaybeParseGNUAttributes(DeclaratorInfo.D);
+ DiagnoseAndSkipCXX11Attributes();
+ }
/// struct-declarator: declarator
/// struct-declarator: declarator[opt] ':' constant-expression
@@ -4100,7 +4185,7 @@ void Parser::ParseStructDeclaration(
/// struct-contents:
/// struct-declaration-list
/// [EXT] empty
-/// [GNU] "struct-declaration-list" without terminatoring ';'
+/// [GNU] "struct-declaration-list" without terminating ';'
/// struct-declaration-list:
/// struct-declaration
/// struct-declaration-list struct-declaration
@@ -4415,7 +4500,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// declares 'enum E : int; E *p;' not 'enum E : int*; E p;'.
DeclSpec DS(AttrFactory);
ParseSpecifierQualifierList(DS, AS, DeclSpecContext::DSC_type_specifier);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
BaseType = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
BaseRange = SourceRange(ColonLoc, DeclaratorInfo.getSourceRange().getEnd());
@@ -4940,6 +5025,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw__Nonnull:
case tok::kw__Nullable:
+ case tok::kw__Nullable_result:
case tok::kw__Null_unspecified:
case tok::kw___kindof:
@@ -5167,6 +5253,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw__Nonnull:
case tok::kw__Nullable:
+ case tok::kw__Nullable_result:
case tok::kw__Null_unspecified:
case tok::kw___kindof:
@@ -5442,6 +5529,7 @@ void Parser::ParseTypeQualifierListOpt(
// Nullability type specifiers.
case tok::kw__Nonnull:
case tok::kw__Nullable:
+ case tok::kw__Nullable_result:
case tok::kw__Null_unspecified:
ParseNullabilityTypeSpecifiers(DS.getAttributes());
continue;
@@ -5515,9 +5603,8 @@ static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
// (The same thing can in theory happen after a trailing-return-type, but
// since those are a C++11 feature, there is no rejects-valid issue there.)
if (Kind == tok::ampamp)
- return Lang.CPlusPlus11 ||
- (TheContext != DeclaratorContext::ConversionIdContext &&
- TheContext != DeclaratorContext::CXXNewContext);
+ return Lang.CPlusPlus11 || (TheContext != DeclaratorContext::ConversionId &&
+ TheContext != DeclaratorContext::CXXNew);
return false;
}
@@ -5571,9 +5658,8 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
(Tok.is(tok::identifier) &&
(NextToken().is(tok::coloncolon) || NextToken().is(tok::less))) ||
Tok.is(tok::annot_cxxscope))) {
- bool EnteringContext =
- D.getContext() == DeclaratorContext::FileContext ||
- D.getContext() == DeclaratorContext::MemberContext;
+ bool EnteringContext = D.getContext() == DeclaratorContext::File ||
+ D.getContext() == DeclaratorContext::Member;
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
/*ObjectHadErrors=*/false, EnteringContext);
@@ -5591,6 +5677,11 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
return;
}
+ if (SS.isValid()) {
+ checkCompoundToken(SS.getEndLoc(), tok::coloncolon,
+ CompoundToken::MemberPtr);
+ }
+
SourceLocation StarLoc = ConsumeToken();
D.SetRangeEnd(StarLoc);
DeclSpec DS(AttrFactory);
@@ -5640,7 +5731,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// GNU attributes are not allowed here in a new-type-id, but Declspec and
// C++11 attributes are allowed.
unsigned Reqs = AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed |
- ((D.getContext() != DeclaratorContext::CXXNewContext)
+ ((D.getContext() != DeclaratorContext::CXXNew)
? AR_GNUAttributesParsed
: AR_GNUAttributesParsedAndRejected);
ParseTypeQualifierListOpt(DS, Reqs, true, !D.mayOmitIdentifier());
@@ -5790,15 +5881,14 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// this context it is a bitfield. Also in range-based for statement colon
// may delimit for-range-declaration.
ColonProtectionRAIIObject X(
- *this, D.getContext() == DeclaratorContext::MemberContext ||
- (D.getContext() == DeclaratorContext::ForContext &&
+ *this, D.getContext() == DeclaratorContext::Member ||
+ (D.getContext() == DeclaratorContext::ForInit &&
getLangOpts().CPlusPlus11));
// ParseDeclaratorInternal might already have parsed the scope.
if (D.getCXXScopeSpec().isEmpty()) {
- bool EnteringContext =
- D.getContext() == DeclaratorContext::FileContext ||
- D.getContext() == DeclaratorContext::MemberContext;
+ bool EnteringContext = D.getContext() == DeclaratorContext::File ||
+ D.getContext() == DeclaratorContext::Member;
ParseOptionalCXXScopeSpecifier(
D.getCXXScopeSpec(), /*ObjectType=*/nullptr,
/*ObjectHadErrors=*/false, EnteringContext);
@@ -5828,11 +5918,10 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// been expanded or contains auto; otherwise, it is parsed as part of the
// parameter-declaration-clause.
if (Tok.is(tok::ellipsis) && D.getCXXScopeSpec().isEmpty() &&
- !((D.getContext() == DeclaratorContext::PrototypeContext ||
- D.getContext() == DeclaratorContext::LambdaExprParameterContext ||
- D.getContext() == DeclaratorContext::BlockLiteralContext) &&
- NextToken().is(tok::r_paren) &&
- !D.hasGroupingParens() &&
+ !((D.getContext() == DeclaratorContext::Prototype ||
+ D.getContext() == DeclaratorContext::LambdaExprParameter ||
+ D.getContext() == DeclaratorContext::BlockLiteral) &&
+ NextToken().is(tok::r_paren) && !D.hasGroupingParens() &&
!Actions.containsUnexpandedParameterPacks(D) &&
D.getDeclSpec().getTypeSpecType() != TST_auto)) {
SourceLocation EllipsisLoc = ConsumeToken();
@@ -5861,16 +5950,13 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
AllowConstructorName = false;
AllowDeductionGuide = false;
} else if (D.getCXXScopeSpec().isSet()) {
- AllowConstructorName =
- (D.getContext() == DeclaratorContext::FileContext ||
- D.getContext() == DeclaratorContext::MemberContext);
+ AllowConstructorName = (D.getContext() == DeclaratorContext::File ||
+ D.getContext() == DeclaratorContext::Member);
AllowDeductionGuide = false;
} else {
- AllowConstructorName =
- (D.getContext() == DeclaratorContext::MemberContext);
- AllowDeductionGuide =
- (D.getContext() == DeclaratorContext::FileContext ||
- D.getContext() == DeclaratorContext::MemberContext);
+ AllowConstructorName = (D.getContext() == DeclaratorContext::Member);
+ AllowDeductionGuide = (D.getContext() == DeclaratorContext::File ||
+ D.getContext() == DeclaratorContext::Member);
}
bool HadScope = D.getCXXScopeSpec().isValid();
@@ -5926,16 +6012,16 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// An identifier within parens is unlikely to be intended to be anything
// other than a name being "declared".
DiagnoseIdentifier = true;
- else if (D.getContext() == DeclaratorContext::TemplateArgContext)
+ else if (D.getContext() == DeclaratorContext::TemplateArg)
// T<int N> is an accidental identifier; T<int N indicates a missing '>'.
DiagnoseIdentifier =
NextToken().isOneOf(tok::comma, tok::greater, tok::greatergreater);
- else if (D.getContext() == DeclaratorContext::AliasDeclContext ||
- D.getContext() == DeclaratorContext::AliasTemplateContext)
+ else if (D.getContext() == DeclaratorContext::AliasDecl ||
+ D.getContext() == DeclaratorContext::AliasTemplate)
// The most likely error is that the ';' was forgotten.
DiagnoseIdentifier = NextToken().isOneOf(tok::comma, tok::semi);
- else if ((D.getContext() == DeclaratorContext::TrailingReturnContext ||
- D.getContext() == DeclaratorContext::TrailingReturnVarContext) &&
+ else if ((D.getContext() == DeclaratorContext::TrailingReturn ||
+ D.getContext() == DeclaratorContext::TrailingReturnVar) &&
!isCXX11VirtSpecifier(Tok))
DiagnoseIdentifier = NextToken().isOneOf(
tok::comma, tok::semi, tok::equal, tok::l_brace, tok::kw_try);
@@ -5994,7 +6080,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
LLVM_BUILTIN_TRAP;
if (Tok.is(tok::l_square))
return ParseMisplacedBracketDeclarator(D);
- if (D.getContext() == DeclaratorContext::MemberContext) {
+ if (D.getContext() == DeclaratorContext::Member) {
// Objective-C++: Detect C++ keywords and try to prevent further errors by
// treating these keyword as valid member names.
if (getLangOpts().ObjC && getLangOpts().CPlusPlus &&
@@ -6285,13 +6371,14 @@ void Parser::InitCXXThisScopeForDeclaratorIfRelevant(
// and the end of the function-definition, member-declarator, or
// declarator.
// FIXME: currently, "static" case isn't handled correctly.
- bool IsCXX11MemberFunction = getLangOpts().CPlusPlus11 &&
- D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
- (D.getContext() == DeclaratorContext::MemberContext
- ? !D.getDeclSpec().isFriendSpecified()
- : D.getContext() == DeclaratorContext::FileContext &&
- D.getCXXScopeSpec().isValid() &&
- Actions.CurContext->isRecord());
+ bool IsCXX11MemberFunction =
+ getLangOpts().CPlusPlus11 &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
+ (D.getContext() == DeclaratorContext::Member
+ ? !D.getDeclSpec().isFriendSpecified()
+ : D.getContext() == DeclaratorContext::File &&
+ D.getCXXScopeSpec().isValid() &&
+ Actions.CurContext->isRecord());
if (!IsCXX11MemberFunction)
return;
@@ -6365,6 +6452,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
CachedTokens *ExceptionSpecTokens = nullptr;
ParsedAttributesWithRange FnAttrs(AttrFactory);
TypeResult TrailingReturnType;
+ SourceLocation TrailingReturnTypeLoc;
/* LocalEndLoc is the end location for the local FunctionTypeLoc.
EndLoc is the end location for the function declarator.
@@ -6429,6 +6517,10 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
InitCXXThisScopeForDeclaratorIfRelevant(D, DS, ThisScope);
// Parse exception-specification[opt].
+ // FIXME: Per [class.mem]p6, all exception-specifications at class scope
+ // should be delayed, including those for non-members (eg, friend
+ // declarations). But only applying this to member declarations is
+ // consistent with what other implementations do.
bool Delayed = D.isFirstDeclarationOfMember() &&
D.isFunctionDeclaratorAFunctionDeclaration();
if (Delayed && Actions.isLibstdcxxEagerExceptionSpecHack(D) &&
@@ -6471,6 +6563,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
SourceRange Range;
TrailingReturnType =
ParseTrailingReturnType(Range, D.mayBeFollowedByCXXDirectInit());
+ TrailingReturnTypeLoc = Range.getBegin();
EndLoc = Range.getEnd();
}
} else if (standardAttributesAllowed()) {
@@ -6503,7 +6596,8 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
DynamicExceptionRanges.data(), DynamicExceptions.size(),
NoexceptExpr.isUsable() ? NoexceptExpr.get() : nullptr,
ExceptionSpecTokens, DeclsInPrototype, StartLoc,
- LocalEndLoc, D, TrailingReturnType, &DS),
+ LocalEndLoc, D, TrailingReturnType, TrailingReturnTypeLoc,
+ &DS),
std::move(FnAttrs), EndLoc);
}
@@ -6686,11 +6780,11 @@ void Parser::ParseParameterDeclarationClause(
// "LambdaExprParameterContext", because we must accept either
// 'declarator' or 'abstract-declarator' here.
Declarator ParmDeclarator(
- DS, DeclaratorCtx == DeclaratorContext::RequiresExprContext
- ? DeclaratorContext::RequiresExprContext
- : DeclaratorCtx == DeclaratorContext::LambdaExprContext
- ? DeclaratorContext::LambdaExprParameterContext
- : DeclaratorContext::PrototypeContext);
+ DS, DeclaratorCtx == DeclaratorContext::RequiresExpr
+ ? DeclaratorContext::RequiresExpr
+ : DeclaratorCtx == DeclaratorContext::LambdaExpr
+ ? DeclaratorContext::LambdaExprParameter
+ : DeclaratorContext::Prototype);
ParseDeclarator(ParmDeclarator);
// Parse GNU attributes, if present.
@@ -6769,7 +6863,7 @@ void Parser::ParseParameterDeclarationClause(
SourceLocation EqualLoc = Tok.getLocation();
// Parse the default argument
- if (DeclaratorCtx == DeclaratorContext::MemberContext) {
+ if (DeclaratorCtx == DeclaratorContext::Member) {
// If we're inside a class definition, cache the tokens
// corresponding to the default argument. We'll actually parse
// them when we see the end of the class definition.
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index ddcbb5615fee..88ebb59f9a60 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -624,8 +624,7 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
// or the simple-template-id's template-name in the last component of the
// nested-name-specifier, the name is [...] considered to name the
// constructor.
- if (getLangOpts().CPlusPlus11 &&
- Context == DeclaratorContext::MemberContext &&
+ if (getLangOpts().CPlusPlus11 && Context == DeclaratorContext::Member &&
Tok.is(tok::identifier) &&
(NextToken().is(tok::semi) || NextToken().is(tok::comma) ||
NextToken().is(tok::ellipsis)) &&
@@ -834,11 +833,11 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
<< FixItHint::CreateRemoval(SourceRange(D.EllipsisLoc));
Decl *DeclFromDeclSpec = nullptr;
- TypeResult TypeAlias = ParseTypeName(
- nullptr,
- TemplateInfo.Kind ? DeclaratorContext::AliasTemplateContext
- : DeclaratorContext::AliasDeclContext,
- AS, &DeclFromDeclSpec, &Attrs);
+ TypeResult TypeAlias =
+ ParseTypeName(nullptr,
+ TemplateInfo.Kind ? DeclaratorContext::AliasTemplate
+ : DeclaratorContext::AliasDecl,
+ AS, &DeclFromDeclSpec, &Attrs);
if (OwnedType)
*OwnedType = DeclFromDeclSpec;
@@ -1046,8 +1045,16 @@ void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec& DS,
SourceLocation StartLoc,
SourceLocation EndLoc) {
// make sure we have a token we can turn into an annotation token
- if (PP.isBacktrackEnabled())
+ if (PP.isBacktrackEnabled()) {
PP.RevertCachedTokens(1);
+ if (DS.getTypeSpecType() == TST_error) {
+ // We encountered an error in parsing 'decltype(...)' so lets annotate all
+ // the tokens in the backtracking cache - that we likely had to skip over
+ // to get to a token that allows us to resume parsing, such as a
+ // semi-colon.
+ EndLoc = PP.getLastCachedTokenLocation();
+ }
+ }
else
PP.EnterToken(Tok, /*IsReinject*/true);
@@ -1140,7 +1147,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
EndLocation = ParseDecltypeSpecifier(DS);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
}
@@ -1232,7 +1239,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
DS.SetTypeSpecType(TST_typename, IdLoc, PrevSpec, DiagID, Type,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
}
@@ -2186,17 +2193,20 @@ void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
auto LateMethod = new LateParsedMethodDeclaration(this, ThisDecl);
getCurrentClass().LateParsedDeclarations.push_back(LateMethod);
- // Stash the exception-specification tokens in the late-pased method.
- LateMethod->ExceptionSpecTokens = FTI.ExceptionSpecTokens;
- FTI.ExceptionSpecTokens = nullptr;
-
- // Push tokens for each parameter. Those that do not have
- // defaults will be NULL.
+ // Push tokens for each parameter. Those that do not have defaults will be
+ // NULL. We need to track all the parameters so that we can push them into
+ // scope for later parameters and perhaps for the exception specification.
LateMethod->DefaultArgs.reserve(FTI.NumParams);
for (unsigned ParamIdx = 0; ParamIdx < FTI.NumParams; ++ParamIdx)
LateMethod->DefaultArgs.push_back(LateParsedDefaultArgument(
FTI.Params[ParamIdx].Param,
std::move(FTI.Params[ParamIdx].DefaultArgTokens)));
+
+ // Stash the exception-specification tokens in the late-pased method.
+ if (FTI.getExceptionSpecType() == EST_Unparsed) {
+ LateMethod->ExceptionSpecTokens = FTI.ExceptionSpecTokens;
+ FTI.ExceptionSpecTokens = nullptr;
+ }
}
}
@@ -2302,10 +2312,15 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize,
LateParsedAttrList &LateParsedAttrs) {
// member-declarator:
- // declarator pure-specifier[opt]
+ // declarator virt-specifier-seq[opt] pure-specifier[opt]
// declarator requires-clause
// declarator brace-or-equal-initializer[opt]
- // identifier[opt] ':' constant-expression
+ // identifier attribute-specifier-seq[opt] ':' constant-expression
+ // brace-or-equal-initializer[opt]
+ // ':' constant-expression
+ //
+ // NOTE: the latter two productions are a proposed bugfix rather than the
+ // current grammar rules as of C++20.
if (Tok.isNot(tok::colon))
ParseDeclarator(DeclaratorInfo);
else
@@ -2339,7 +2354,11 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
}
// If attributes exist after the declarator, but before an '{', parse them.
+ // However, this does not apply for [[]] attributes (which could show up
+ // before or after the __attribute__ attributes).
+ DiagnoseAndSkipCXX11Attributes();
MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
+ DiagnoseAndSkipCXX11Attributes();
// For compatibility with code written to older Clang, also accept a
// virt-specifier *after* the GNU attributes.
@@ -2412,7 +2431,7 @@ void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
const char *Name = (RefQualifierIsLValueRef ? "& " : "&& ");
FixItHint Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name);
Function.RefQualifierIsLValueRef = RefQualifierIsLValueRef;
- Function.RefQualifierLoc = RefQualifierLoc.getRawEncoding();
+ Function.RefQualifierLoc = RefQualifierLoc;
Diag(RefQualifierLoc, diag::err_declspec_after_virtspec)
<< (RefQualifierIsLValueRef ? "&" : "&&")
@@ -2554,7 +2573,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SourceLocation DeclEnd;
return DeclGroupPtrTy::make(
DeclGroupRef(ParseTemplateDeclarationOrSpecialization(
- DeclaratorContext::MemberContext, DeclEnd, AccessAttrs, AS)));
+ DeclaratorContext::Member, DeclEnd, AccessAttrs, AS)));
}
// Handle: member-declaration ::= '__extension__' member-declaration
@@ -2597,7 +2616,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
SourceLocation DeclEnd;
// Otherwise, it must be a using-declaration or an alias-declaration.
- return ParseUsingDeclaration(DeclaratorContext::MemberContext, TemplateInfo,
+ return ParseUsingDeclaration(DeclaratorContext::Member, TemplateInfo,
UsingLoc, DeclEnd, AS);
}
@@ -2645,7 +2664,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- ParsingDeclarator DeclaratorInfo(*this, DS, DeclaratorContext::MemberContext);
+ ParsingDeclarator DeclaratorInfo(*this, DS, DeclaratorContext::Member);
if (TemplateInfo.TemplateParams)
DeclaratorInfo.setTemplateParameterLists(TemplateParams);
VirtSpecifiers VS;
@@ -2696,23 +2715,23 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (getLangOpts().MicrosoftExt && DeclaratorInfo.isDeclarationOfFunction())
TryConsumePureSpecifier(/*AllowDefinition*/ true);
- FunctionDefinitionKind DefinitionKind = FDK_Declaration;
+ FunctionDefinitionKind DefinitionKind = FunctionDefinitionKind::Declaration;
// function-definition:
//
// In C++11, a non-function declarator followed by an open brace is a
// braced-init-list for an in-class member initialization, not an
// erroneous function definition.
if (Tok.is(tok::l_brace) && !getLangOpts().CPlusPlus11) {
- DefinitionKind = FDK_Definition;
+ DefinitionKind = FunctionDefinitionKind::Definition;
} else if (DeclaratorInfo.isFunctionDeclarator()) {
if (Tok.isOneOf(tok::l_brace, tok::colon, tok::kw_try)) {
- DefinitionKind = FDK_Definition;
+ DefinitionKind = FunctionDefinitionKind::Definition;
} else if (Tok.is(tok::equal)) {
const Token &KW = NextToken();
if (KW.is(tok::kw_default))
- DefinitionKind = FDK_Defaulted;
+ DefinitionKind = FunctionDefinitionKind::Defaulted;
else if (KW.is(tok::kw_delete))
- DefinitionKind = FDK_Deleted;
+ DefinitionKind = FunctionDefinitionKind::Deleted;
else if (KW.is(tok::code_completion)) {
Actions.CodeCompleteAfterFunctionEquals(DeclaratorInfo);
cutOffParsing();
@@ -2725,13 +2744,14 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// C++11 [dcl.attr.grammar] p4: If an attribute-specifier-seq appertains
// to a friend declaration, that declaration shall be a definition.
if (DeclaratorInfo.isFunctionDeclarator() &&
- DefinitionKind == FDK_Declaration && DS.isFriendSpecified()) {
+ DefinitionKind == FunctionDefinitionKind::Declaration &&
+ DS.isFriendSpecified()) {
// Diagnose attributes that appear before decl specifier:
// [[]] friend int foo();
ProhibitAttributes(FnAttrs);
}
- if (DefinitionKind != FDK_Declaration) {
+ if (DefinitionKind != FunctionDefinitionKind::Declaration) {
if (!DeclaratorInfo.isFunctionDeclarator()) {
Diag(DeclaratorInfo.getIdentifierLoc(), diag::err_func_def_no_params);
ConsumeBrace();
@@ -2781,7 +2801,12 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
InClassInitStyle HasInClassInit = ICIS_NoInit;
bool HasStaticInitializer = false;
if (Tok.isOneOf(tok::equal, tok::l_brace) && PureSpecLoc.isInvalid()) {
- if (DeclaratorInfo.isDeclarationOfFunction()) {
+ // DRXXXX: Anonymous bit-fields cannot have a brace-or-equal-initializer.
+ if (BitfieldSize.isUsable() && !DeclaratorInfo.hasName()) {
+ // Diagnose the error and pretend there is no in-class initializer.
+ Diag(Tok, diag::err_anon_bitfield_member_init);
+ SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
+ } else if (DeclaratorInfo.isDeclarationOfFunction()) {
// It's a pure-specifier.
if (!TryConsumePureSpecifier(/*AllowFunctionDefinition*/ false))
// Parse it as an expression so that Sema can diagnose it.
@@ -2912,7 +2937,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
break;
if (Tok.isAtStartOfLine() &&
- !MightBeDeclarator(DeclaratorContext::MemberContext)) {
+ !MightBeDeclarator(DeclaratorContext::Member)) {
// This comma was followed by a line-break and something which can't be
// the start of a declarator. The comma was probably a typo for a
// semicolon.
@@ -2930,7 +2955,11 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DeclaratorInfo.setCommaLoc(CommaLoc);
// GNU attributes are allowed before the second and subsequent declarator.
+ // However, this does not apply for [[]] attributes (which could show up
+ // before or after the __attribute__ attributes).
+ DiagnoseAndSkipCXX11Attributes();
MaybeParseGNUAttributes(DeclaratorInfo);
+ DiagnoseAndSkipCXX11Attributes();
if (ParseCXXMemberDeclaratorBeforeInitializer(
DeclaratorInfo, VS, BitfieldSize, LateParsedAttrs))
@@ -3379,7 +3408,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
FPOptionsOverride NewOverrides;
Actions.CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
Actions.FpPragmaStack.Act(Tok.getLocation(), Sema::PSK_Reset, StringRef(),
- 0 /*unused*/);
+ {} /*unused*/);
SourceLocation SavedPrevTokLocation = PrevTokLocation;
ParseLexedPragmas(getCurrentClass());
@@ -3823,8 +3852,8 @@ TypeResult Parser::ParseTrailingReturnType(SourceRange &Range,
ConsumeToken();
return ParseTypeName(&Range, MayBeFollowedByDirectInit
- ? DeclaratorContext::TrailingReturnVarContext
- : DeclaratorContext::TrailingReturnContext);
+ ? DeclaratorContext::TrailingReturnVar
+ : DeclaratorContext::TrailingReturn);
}
/// Parse a requires-clause as part of a function declaration.
@@ -3877,6 +3906,7 @@ void Parser::ParseTrailingRequiresClause(Declarator &D) {
auto &FunctionChunk = D.getFunctionTypeInfo();
FunctionChunk.HasTrailingReturnType = TrailingReturnType.isUsable();
FunctionChunk.TrailingReturnType = TrailingReturnType.get();
+ FunctionChunk.TrailingReturnTypeLoc = Range.getBegin();
} else
SkipUntil({tok::equal, tok::l_brace, tok::arrow, tok::kw_try, tok::comma},
StopAtSemi | StopBeforeMatch);
@@ -4018,6 +4048,8 @@ static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
case ParsedAttr::AT_FallThrough:
case ParsedAttr::AT_CXX11NoReturn:
case ParsedAttr::AT_NoUniqueAddress:
+ case ParsedAttr::AT_Likely:
+ case ParsedAttr::AT_Unlikely:
return true;
case ParsedAttr::AT_WarnUnusedResult:
return !ScopeName && AttrName->getName().equals("nodiscard");
@@ -4142,9 +4174,11 @@ void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square) &&
"Not a double square bracket attribute list");
- Diag(Tok.getLocation(), diag::warn_cxx98_compat_attribute);
+ SourceLocation OpenLoc = Tok.getLocation();
+ Diag(OpenLoc, diag::warn_cxx98_compat_attribute);
ConsumeBracket();
+ checkCompoundToken(OpenLoc, tok::l_square, CompoundToken::AttrBegin);
ConsumeBracket();
SourceLocation CommonScopeLoc;
@@ -4227,8 +4261,11 @@ void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
<< AttrName;
}
+ SourceLocation CloseLoc = Tok.getLocation();
if (ExpectAndConsume(tok::r_square))
SkipUntil(tok::r_square);
+ else if (Tok.is(tok::r_square))
+ checkCompoundToken(CloseLoc, tok::r_square, CompoundToken::AttrEnd);
if (endLoc)
*endLoc = Tok.getLocation();
if (ExpectAndConsume(tok::r_square))
diff --git a/clang/lib/Parse/ParseExpr.cpp b/clang/lib/Parse/ParseExpr.cpp
index 81e87582c6ad..6acf76d713fd 100644
--- a/clang/lib/Parse/ParseExpr.cpp
+++ b/clang/lib/Parse/ParseExpr.cpp
@@ -1007,23 +1007,11 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Res = Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
break;
- case tok::annot_uneval_primary_expr:
case tok::annot_primary_expr:
+ case tok::annot_overload_set:
Res = getExprAnnotation(Tok);
- if (SavedKind == tok::annot_uneval_primary_expr) {
- if (Expr *E = Res.get()) {
- if (!E->isTypeDependent() && !E->containsErrors()) {
- // TransformToPotentiallyEvaluated expects that it will still be in a
- // (temporary) unevaluated context and then looks through that context
- // to build it in the surrounding context. So we need to push an
- // unevaluated context to balance things out.
- EnterExpressionEvaluationContext Unevaluated(
- Actions, Sema::ExpressionEvaluationContext::Unevaluated,
- Sema::ReuseLambdaContextDecl);
- Res = Actions.TransformToPotentiallyEvaluated(Res.get());
- }
- }
- }
+ if (!Res.isInvalid() && Tok.getKind() == tok::annot_overload_set)
+ Res = Actions.ActOnNameClassifiedAsOverloadSet(getCurScope(), Res.get());
ConsumeAnnotationToken();
if (!Res.isInvalid() && Tok.is(tok::less))
checkPotentialAngleBracket(Res);
@@ -1223,7 +1211,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
DS.SetTypeSpecType(TST_typename, ILoc, PrevSpec, DiagID, Typ,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
TypeResult Ty = Actions.ActOnTypeName(getCurScope(),
DeclaratorInfo);
if (Ty.isInvalid())
@@ -1481,9 +1469,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw_this:
Res = ParseCXXThis();
break;
- case tok::kw___builtin_unique_stable_name:
- Res = ParseUniqueStableNameExpression();
- break;
+
case tok::annot_typename:
if (isStartOfObjCClassMessageMissingOpenBracket()) {
TypeResult Type = getTypeAnnotation(Tok);
@@ -1499,7 +1485,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
PrevSpec, DiagID, Type,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
TypeResult Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
if (Ty.isInvalid())
break;
@@ -2275,15 +2261,20 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
if (isTypeIdUnambiguously()) {
DeclSpec DS(AttrFactory);
ParseSpecifierQualifierList(DS);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
SourceLocation LParenLoc = PP.getLocForEndOfToken(OpTok.getLocation());
SourceLocation RParenLoc = PP.getLocForEndOfToken(PrevTokLocation);
- Diag(LParenLoc, diag::err_expected_parentheses_around_typename)
- << OpTok.getName()
- << FixItHint::CreateInsertion(LParenLoc, "(")
- << FixItHint::CreateInsertion(RParenLoc, ")");
+ if (LParenLoc.isInvalid() || RParenLoc.isInvalid()) {
+ Diag(OpTok.getLocation(),
+ diag::err_expected_parentheses_around_typename)
+ << OpTok.getName();
+ } else {
+ Diag(LParenLoc, diag::err_expected_parentheses_around_typename)
+ << OpTok.getName() << FixItHint::CreateInsertion(LParenLoc, "(")
+ << FixItHint::CreateInsertion(RParenLoc, ")");
+ }
isCastExpr = true;
return ExprEmpty();
}
@@ -2332,43 +2323,6 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
}
-ExprResult Parser::ParseUniqueStableNameExpression() {
- assert(Tok.is(tok::kw___builtin_unique_stable_name) &&
- "Not __bulitin_unique_stable_name");
-
- SourceLocation OpLoc = ConsumeToken();
- BalancedDelimiterTracker T(*this, tok::l_paren);
-
- // typeid expressions are always parenthesized.
- if (T.expectAndConsume(diag::err_expected_lparen_after,
- "__builtin_unique_stable_name"))
- return ExprError();
-
- if (isTypeIdInParens()) {
- TypeResult Ty = ParseTypeName();
- T.consumeClose();
-
- if (Ty.isInvalid())
- return ExprError();
-
- return Actions.ActOnUniqueStableNameExpr(OpLoc, T.getOpenLocation(),
- T.getCloseLocation(), Ty.get());
- }
-
- EnterExpressionEvaluationContext Unevaluated(
- Actions, Sema::ExpressionEvaluationContext::Unevaluated);
- ExprResult Result = ParseExpression();
-
- if (Result.isInvalid()) {
- SkipUntil(tok::r_paren, StopAtSemi);
- return Result;
- }
-
- T.consumeClose();
- return Actions.ActOnUniqueStableNameExpr(OpLoc, T.getOpenLocation(),
- T.getCloseLocation(), Result.get());
-}
-
/// Parse a sizeof or alignof expression.
///
/// \verbatim
@@ -2852,6 +2806,8 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
if (ExprType >= CompoundStmt && Tok.is(tok::l_brace)) {
Diag(Tok, diag::ext_gnu_statement_expr);
+ checkCompoundToken(OpenLoc, tok::l_paren, CompoundToken::StmtExprBegin);
+
if (!getCurScope()->getFnParent() && !getCurScope()->getBlockParent()) {
Result = ExprError(Diag(OpenLoc, diag::err_stmtexpr_file_scope));
} else {
@@ -2936,7 +2892,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// Parse the type declarator.
DeclSpec DS(AttrFactory);
ParseSpecifierQualifierList(DS);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
// If our type is followed by an identifier and either ':' or ']', then
@@ -3160,6 +3116,7 @@ Parser::ParseCompoundLiteralExpression(ParsedType Ty,
assert(Tok.is(tok::l_brace) && "Not a compound literal!");
if (!getLangOpts().C99) // Compound literals don't exist in C90.
Diag(LParenLoc, diag::ext_c99_compound_literal);
+ PreferredType.enterTypeCast(Tok.getLocation(), Ty.get());
ExprResult Result = ParseInitializer();
if (!Result.isInvalid() && Ty)
return Actions.ActOnCompoundLiteral(LParenLoc, Ty, RParenLoc, Result.get());
@@ -3336,8 +3293,9 @@ ExprResult Parser::ParseFoldExpression(ExprResult LHS,
: diag::ext_fold_expression);
T.consumeClose();
- return Actions.ActOnCXXFoldExpr(T.getOpenLocation(), LHS.get(), Kind,
- EllipsisLoc, RHS.get(), T.getCloseLocation());
+ return Actions.ActOnCXXFoldExpr(getCurScope(), T.getOpenLocation(), LHS.get(),
+ Kind, EllipsisLoc, RHS.get(),
+ T.getCloseLocation());
}
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
@@ -3461,8 +3419,8 @@ void Parser::ParseBlockId(SourceLocation CaretLoc) {
ParseSpecifierQualifierList(DS);
// Parse the block-declarator.
- Declarator DeclaratorInfo(DS, DeclaratorContext::BlockLiteralContext);
- DeclaratorInfo.setFunctionDefinitionKind(FDK_Definition);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::BlockLiteral);
+ DeclaratorInfo.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
ParseDeclarator(DeclaratorInfo);
MaybeParseGNUAttributes(DeclaratorInfo);
@@ -3500,8 +3458,8 @@ ExprResult Parser::ParseBlockLiteralExpression() {
// Parse the return type if present.
DeclSpec DS(AttrFactory);
- Declarator ParamInfo(DS, DeclaratorContext::BlockLiteralContext);
- ParamInfo.setFunctionDefinitionKind(FDK_Definition);
+ Declarator ParamInfo(DS, DeclaratorContext::BlockLiteral);
+ ParamInfo.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
// FIXME: Since the return type isn't actually parsed, it can't be used to
// fill ParamInfo with an initial valid range, so do it manually.
ParamInfo.SetSourceRange(SourceRange(Tok.getLocation(), Tok.getLocation()));
diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp
index aa35200c33b6..4b5703d79f28 100644
--- a/clang/lib/Parse/ParseExprCXX.cpp
+++ b/clang/lib/Parse/ParseExprCXX.cpp
@@ -926,6 +926,15 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
} else if (Tok.is(tok::kw_this)) {
Kind = LCK_This;
Loc = ConsumeToken();
+ } else if (Tok.isOneOf(tok::amp, tok::equal) &&
+ NextToken().isOneOf(tok::comma, tok::r_square) &&
+ Intro.Default == LCD_None) {
+ // We have a lone "&" or "=" which is either a misplaced capture-default
+ // or the start of a capture (in the "&" case) with the rest of the
+ // capture missing. Both are an error but a misplaced capture-default
+ // is more likely if we don't already have a capture default.
+ return Invalid(
+ [&] { Diag(Tok.getLocation(), diag::err_capture_default_first); });
} else {
TryConsumeToken(tok::ellipsis, EllipsisLocs[0]);
@@ -1197,7 +1206,8 @@ addConstexprToLambdaDeclSpecifier(Parser &P, SourceLocation ConstexprLoc,
: diag::warn_cxx14_compat_constexpr_on_lambda);
const char *PrevSpec = nullptr;
unsigned DiagID = 0;
- DS.SetConstexprSpec(CSK_constexpr, ConstexprLoc, PrevSpec, DiagID);
+ DS.SetConstexprSpec(ConstexprSpecKind::Constexpr, ConstexprLoc, PrevSpec,
+ DiagID);
assert(PrevSpec == nullptr && DiagID == 0 &&
"Constexpr cannot have been set previously!");
}
@@ -1210,7 +1220,8 @@ static void addConstevalToLambdaDeclSpecifier(Parser &P,
P.Diag(ConstevalLoc, diag::warn_cxx20_compat_consteval);
const char *PrevSpec = nullptr;
unsigned DiagID = 0;
- DS.SetConstexprSpec(CSK_consteval, ConstevalLoc, PrevSpec, DiagID);
+ DS.SetConstexprSpec(ConstexprSpecKind::Consteval, ConstevalLoc, PrevSpec,
+ DiagID);
if (DiagID != 0)
P.Diag(ConstevalLoc, DiagID) << PrevSpec;
}
@@ -1233,7 +1244,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
// Parse lambda-declarator[opt].
DeclSpec DS(AttrFactory);
- Declarator D(DS, DeclaratorContext::LambdaExprContext);
+ Declarator D(DS, DeclaratorContext::LambdaExpr);
TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
Actions.PushLambdaScope();
@@ -1257,7 +1268,6 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
<< A.getAttrName()->getName();
};
- // FIXME: Consider allowing this as an extension for GCC compatibiblity.
MultiParseScope TemplateParamScope(*this);
if (Tok.is(tok::less)) {
Diag(Tok, getLangOpts().CPlusPlus20
@@ -1277,13 +1287,23 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
Diag(RAngleLoc,
diag::err_lambda_template_parameter_list_empty);
} else {
+ ExprResult RequiresClause;
+ if (TryConsumeToken(tok::kw_requires)) {
+ RequiresClause =
+ Actions.ActOnRequiresClause(ParseConstraintLogicalOrExpression(
+ /*IsTrailingRequiresClause=*/false));
+ if (RequiresClause.isInvalid())
+ SkipUntil({tok::l_brace, tok::l_paren}, StopAtSemi | StopBeforeMatch);
+ }
+
Actions.ActOnLambdaExplicitTemplateParameterList(
- LAngleLoc, TemplateParams, RAngleLoc);
+ LAngleLoc, TemplateParams, RAngleLoc, RequiresClause);
++CurTemplateDepthTracker;
}
}
TypeResult TrailingReturnType;
+ SourceLocation TrailingReturnTypeLoc;
if (Tok.is(tok::l_paren)) {
ParseScope PrototypeScope(this,
Scope::FunctionPrototypeScope |
@@ -1370,6 +1390,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
SourceRange Range;
TrailingReturnType =
ParseTrailingReturnType(Range, /*MayBeFollowedByDirectInit*/ false);
+ TrailingReturnTypeLoc = Range.getBegin();
if (Range.getEnd().isValid())
DeclEndLoc = Range.getEnd();
}
@@ -1386,7 +1407,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
NoexceptExpr.isUsable() ? NoexceptExpr.get() : nullptr,
/*ExceptionSpecTokens*/ nullptr,
/*DeclsInPrototype=*/None, LParenLoc, FunLocalRangeEnd, D,
- TrailingReturnType, &DS),
+ TrailingReturnType, TrailingReturnTypeLoc, &DS),
std::move(Attr), DeclEndLoc);
// Parse requires-clause[opt].
@@ -1546,7 +1567,7 @@ ExprResult Parser::ParseCXXCasts() {
ParseSpecifierQualifierList(DS);
// Parse the abstract-declarator, if present.
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
SourceLocation RAngleBracketLoc = Tok.getLocation();
@@ -1872,7 +1893,7 @@ ExprResult Parser::ParseCXXThis() {
/// In C++1z onwards, the type specifier can also be a template-name.
ExprResult
Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
- Declarator DeclaratorInfo(DS, DeclaratorContext::FunctionalCastContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::FunctionalCast);
ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
assert((Tok.is(tok::l_paren) ||
@@ -2020,9 +2041,8 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
case ConditionOrInitStatement::InitStmtDecl: {
WarnOnInit();
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy DG =
- ParseSimpleDeclaration(DeclaratorContext::InitStmtContext, DeclEnd,
- attrs, /*RequireSemi=*/true);
+ DeclGroupPtrTy DG = ParseSimpleDeclaration(
+ DeclaratorContext::SelectionInit, DeclEnd, attrs, /*RequireSemi=*/true);
*InitStmt = Actions.ActOnDeclStmt(DG, DeclStart, DeclEnd);
return ParseCXXCondition(nullptr, Loc, CK);
}
@@ -2030,8 +2050,8 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
case ConditionOrInitStatement::ForRangeDecl: {
assert(FRI && "should not parse a for range declaration here");
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy DG = ParseSimpleDeclaration(
- DeclaratorContext::ForContext, DeclEnd, attrs, false, FRI);
+ DeclGroupPtrTy DG = ParseSimpleDeclaration(DeclaratorContext::ForInit,
+ DeclEnd, attrs, false, FRI);
FRI->LoopVar = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
return Sema::ConditionResult();
}
@@ -2047,7 +2067,7 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
ParseSpecifierQualifierList(DS, AS_none, DeclSpecContext::DSC_condition);
// declarator
- Declarator DeclaratorInfo(DS, DeclaratorContext::ConditionContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::Condition);
ParseDeclarator(DeclaratorInfo);
// simple-asm-expr[opt]
@@ -2174,19 +2194,22 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
// builtin types
case tok::kw_short:
- DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec, DiagID, Policy);
+ DS.SetTypeSpecWidth(TypeSpecifierWidth::Short, Loc, PrevSpec, DiagID,
+ Policy);
break;
case tok::kw_long:
- DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec, DiagID, Policy);
+ DS.SetTypeSpecWidth(TypeSpecifierWidth::Long, Loc, PrevSpec, DiagID,
+ Policy);
break;
case tok::kw___int64:
- DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec, DiagID, Policy);
+ DS.SetTypeSpecWidth(TypeSpecifierWidth::LongLong, Loc, PrevSpec, DiagID,
+ Policy);
break;
case tok::kw_signed:
- DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec, DiagID);
+ DS.SetTypeSpecSign(TypeSpecifierSign::Signed, Loc, PrevSpec, DiagID);
break;
case tok::kw_unsigned:
- DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec, DiagID);
+ DS.SetTypeSpecSign(TypeSpecifierSign::Unsigned, Loc, PrevSpec, DiagID);
break;
case tok::kw_void:
DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec, DiagID, Policy);
@@ -2688,7 +2711,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
// Parse the conversion-declarator, which is merely a sequence of
// ptr-operators.
- Declarator D(DS, DeclaratorContext::ConversionIdContext);
+ Declarator D(DS, DeclaratorContext::ConversionId);
ParseDeclaratorInternal(D, /*DirectDeclParser=*/nullptr);
// Finish up the type.
@@ -3046,7 +3069,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
SourceRange TypeIdParens;
DeclSpec DS(AttrFactory);
- Declarator DeclaratorInfo(DS, DeclaratorContext::CXXNewContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::CXXNew);
if (Tok.is(tok::l_paren)) {
// If it turns out to be a placement, we change the type location.
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -3368,7 +3391,7 @@ ExprResult Parser::ParseRequiresExpression() {
ParsedAttributes FirstArgAttrs(getAttrFactory());
SourceLocation EllipsisLoc;
llvm::SmallVector<DeclaratorChunk::ParamInfo, 2> LocalParameters;
- ParseParameterDeclarationClause(DeclaratorContext::RequiresExprContext,
+ ParseParameterDeclarationClause(DeclaratorContext::RequiresExpr,
FirstArgAttrs, LocalParameters,
EllipsisLoc);
if (EllipsisLoc.isValid())
@@ -3896,7 +3919,7 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
if (ParseAs >= CompoundLiteral) {
// Parse the type declarator.
DeclSpec DS(AttrFactory);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
{
ColonProtectionRAIIObject InnerColonProtection(*this);
ParseSpecifierQualifierList(DS);
@@ -3974,7 +3997,7 @@ ExprResult Parser::ParseBuiltinBitCast() {
ParseSpecifierQualifierList(DS);
// Parse the abstract-declarator, if present.
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
if (ExpectAndConsume(tok::comma)) {
diff --git a/clang/lib/Parse/ParseObjc.cpp b/clang/lib/Parse/ParseObjc.cpp
index eaea8666bc10..88942ed173d0 100644
--- a/clang/lib/Parse/ParseObjc.cpp
+++ b/clang/lib/Parse/ParseObjc.cpp
@@ -657,7 +657,7 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
if (Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert)) {
SourceLocation DeclEnd;
allTUVariables.push_back(
- ParseDeclaration(DeclaratorContext::FileContext, DeclEnd, attrs));
+ ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs));
continue;
}
@@ -1141,13 +1141,13 @@ bool Parser::isTokIdentifier_in() const {
///
void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context) {
- assert(Context == DeclaratorContext::ObjCParameterContext ||
- Context == DeclaratorContext::ObjCResultContext);
+ assert(Context == DeclaratorContext::ObjCParameter ||
+ Context == DeclaratorContext::ObjCResult);
while (1) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCPassingType(getCurScope(), DS,
- Context == DeclaratorContext::ObjCParameterContext);
+ Actions.CodeCompleteObjCPassingType(
+ getCurScope(), DS, Context == DeclaratorContext::ObjCParameter);
return cutOffParsing();
}
@@ -1237,10 +1237,10 @@ static void takeDeclAttributes(ParsedAttributes &attrs,
ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
DeclaratorContext context,
ParsedAttributes *paramAttrs) {
- assert(context == DeclaratorContext::ObjCParameterContext ||
- context == DeclaratorContext::ObjCResultContext);
+ assert(context == DeclaratorContext::ObjCParameter ||
+ context == DeclaratorContext::ObjCResult);
assert((paramAttrs != nullptr) ==
- (context == DeclaratorContext::ObjCParameterContext));
+ (context == DeclaratorContext::ObjCParameter));
assert(Tok.is(tok::l_paren) && "expected (");
@@ -1259,7 +1259,7 @@ ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
DeclSpec declSpec(AttrFactory);
declSpec.setObjCQualifiers(&DS);
DeclSpecContext dsContext = DeclSpecContext::DSC_normal;
- if (context == DeclaratorContext::ObjCResultContext)
+ if (context == DeclaratorContext::ObjCResult)
dsContext = DeclSpecContext::DSC_objc_method_result;
ParseSpecifierQualifierList(declSpec, AS_none, dsContext);
Declarator declarator(declSpec, context);
@@ -1281,7 +1281,7 @@ ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
// If we're parsing a parameter, steal all the decl attributes
// and add them to the decl spec.
- if (context == DeclaratorContext::ObjCParameterContext)
+ if (context == DeclaratorContext::ObjCParameter)
takeDeclAttributes(*paramAttrs, declarator);
}
}
@@ -1345,8 +1345,8 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
ParsedType ReturnType;
ObjCDeclSpec DSRet;
if (Tok.is(tok::l_paren))
- ReturnType = ParseObjCTypeName(DSRet, DeclaratorContext::ObjCResultContext,
- nullptr);
+ ReturnType =
+ ParseObjCTypeName(DSRet, DeclaratorContext::ObjCResult, nullptr);
// If attributes exist before the method, parse them.
ParsedAttributes methodAttrs(AttrFactory);
@@ -1407,9 +1407,8 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
ArgInfo.Type = nullptr;
if (Tok.is(tok::l_paren)) // Parse the argument type if present.
- ArgInfo.Type = ParseObjCTypeName(ArgInfo.DeclSpec,
- DeclaratorContext::ObjCParameterContext,
- &paramAttrs);
+ ArgInfo.Type = ParseObjCTypeName(
+ ArgInfo.DeclSpec, DeclaratorContext::ObjCParameter, &paramAttrs);
// If attributes exist before the argument name, parse them.
// Regardless, collect all the attributes we've parsed so far.
@@ -1485,7 +1484,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS);
// Parse the declarator.
- Declarator ParmDecl(DS, DeclaratorContext::PrototypeContext);
+ Declarator ParmDecl(DS, DeclaratorContext::Prototype);
ParseDeclarator(ParmDecl);
IdentifierInfo *ParmII = ParmDecl.getIdentifier();
Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl);
@@ -1692,7 +1691,7 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
typeArg, Actions.getASTContext().getPrintingPolicy());
// Form a declarator to turn this into a type.
- Declarator D(DS, DeclaratorContext::TypeNameContext);
+ Declarator D(DS, DeclaratorContext::TypeName);
TypeResult fullTypeArg = Actions.ActOnTypeName(getCurScope(), D);
if (fullTypeArg.isUsable()) {
typeArgs.push_back(fullTypeArg.get());
@@ -2536,7 +2535,7 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
if (Tok.isNot(tok::ellipsis)) {
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS);
- Declarator ParmDecl(DS, DeclaratorContext::ObjCCatchContext);
+ Declarator ParmDecl(DS, DeclaratorContext::ObjCCatch);
ParseDeclarator(ParmDecl);
// Inform the actions module about the declarator, so it
@@ -2952,7 +2951,7 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
// We have a class message. Turn the simple-type-specifier or
// typename-specifier we parsed into a type and parse the
// remainder of the class message.
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
TypeResult Type = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
if (Type.isInvalid())
return true;
diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp
index 5223755c8fdf..db7e967b15ae 100644
--- a/clang/lib/Parse/ParseOpenMP.cpp
+++ b/clang/lib/Parse/ParseOpenMP.cpp
@@ -21,6 +21,7 @@
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/UniqueVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
@@ -60,6 +61,12 @@ enum OpenMPDirectiveKindEx {
struct OpenMPDirectiveKindExWrapper {
OpenMPDirectiveKindExWrapper(unsigned Value) : Value(Value) {}
OpenMPDirectiveKindExWrapper(OpenMPDirectiveKind DK) : Value(unsigned(DK)) {}
+ bool operator==(OpenMPDirectiveKindExWrapper V) const {
+ return Value == V.Value;
+ }
+ bool operator!=(OpenMPDirectiveKindExWrapper V) const {
+ return Value != V.Value;
+ }
bool operator==(OpenMPDirectiveKind V) const { return Value == unsigned(V); }
bool operator!=(OpenMPDirectiveKind V) const { return Value != unsigned(V); }
bool operator<(OpenMPDirectiveKind V) const { return Value < unsigned(V); }
@@ -115,7 +122,9 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
// TODO: add other combined directives in topological order.
static const OpenMPDirectiveKindExWrapper F[][3] = {
{OMPD_begin, OMPD_declare, OMPD_begin_declare},
+ {OMPD_begin, OMPD_assumes, OMPD_begin_assumes},
{OMPD_end, OMPD_declare, OMPD_end_declare},
+ {OMPD_end, OMPD_assumes, OMPD_end_assumes},
{OMPD_cancellation, OMPD_point, OMPD_cancellation_point},
{OMPD_declare, OMPD_reduction, OMPD_declare_reduction},
{OMPD_declare, OMPD_mapper, OMPD_declare_mapper},
@@ -297,8 +306,7 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
do {
ColonProtectionRAIIObject ColonRAII(*this);
SourceRange Range;
- TypeResult TR =
- ParseTypeName(&Range, DeclaratorContext::PrototypeContext, AS);
+ TypeResult TR = ParseTypeName(&Range, DeclaratorContext::Prototype, AS);
if (TR.isUsable()) {
QualType ReductionType =
Actions.ActOnOpenMPDeclareReductionType(Range.getBegin(), TR);
@@ -568,9 +576,6 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) {
}
// Enter scope.
- OMPDeclareMapperDecl *DMD = Actions.ActOnOpenMPDeclareMapperDirectiveStart(
- getCurScope(), Actions.getCurLexicalContext(), MapperId, MapperType,
- Range.getBegin(), VName, AS);
DeclarationNameInfo DirName;
SourceLocation Loc = Tok.getLocation();
unsigned ScopeFlags = Scope::FnScope | Scope::DeclScope |
@@ -579,8 +584,8 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) {
Actions.StartOpenMPDSABlock(OMPD_declare_mapper, DirName, getCurScope(), Loc);
// Add the mapper variable declaration.
- Actions.ActOnOpenMPDeclareMapperDirectiveVarDecl(
- DMD, getCurScope(), MapperType, Range.getBegin(), VName);
+ ExprResult MapperVarRef = Actions.ActOnOpenMPDeclareMapperDirectiveVarDecl(
+ getCurScope(), MapperType, Range.getBegin(), VName);
// Parse map clauses.
SmallVector<OMPClause *, 6> Clauses;
@@ -590,7 +595,7 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) {
: getOpenMPClauseKind(PP.getSpelling(Tok));
Actions.StartOpenMPClause(CKind);
OMPClause *Clause =
- ParseOpenMPClause(OMPD_declare_mapper, CKind, Clauses.size() == 0);
+ ParseOpenMPClause(OMPD_declare_mapper, CKind, Clauses.empty());
if (Clause)
Clauses.push_back(Clause);
else
@@ -609,12 +614,13 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) {
// Exit scope.
Actions.EndOpenMPDSABlock(nullptr);
OMPDirectiveScope.Exit();
-
- DeclGroupPtrTy DGP =
- Actions.ActOnOpenMPDeclareMapperDirectiveEnd(DMD, getCurScope(), Clauses);
+ DeclGroupPtrTy DG = Actions.ActOnOpenMPDeclareMapperDirective(
+ getCurScope(), Actions.getCurLexicalContext(), MapperId, MapperType,
+ Range.getBegin(), VName, AS, MapperVarRef.get(), Clauses);
if (!IsCorrect)
return DeclGroupPtrTy();
- return DGP;
+
+ return DG;
}
TypeResult Parser::parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
@@ -626,7 +632,7 @@ TypeResult Parser::parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
ParseSpecifierQualifierList(DS, AS, DSC);
// Parse the declarator.
- DeclaratorContext Context = DeclaratorContext::PrototypeContext;
+ DeclaratorContext Context = DeclaratorContext::Prototype;
Declarator DeclaratorInfo(DS, Context);
ParseDeclarator(DeclaratorInfo);
Range = DeclaratorInfo.getSourceRange();
@@ -855,21 +861,22 @@ static bool checkForDuplicates(Parser &P, StringRef Name,
}
} // namespace
-void Parser::parseOMPTraitPropertyKind(
- OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set,
- llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen) {
+void Parser::parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
+ llvm::omp::TraitSet Set,
+ llvm::omp::TraitSelector Selector,
+ llvm::StringMap<SourceLocation> &Seen) {
TIProperty.Kind = TraitProperty::invalid;
SourceLocation NameLoc = Tok.getLocation();
- StringRef Name =
- getNameFromIdOrString(*this, Tok, CONTEXT_TRAIT_LVL);
+ StringRef Name = getNameFromIdOrString(*this, Tok, CONTEXT_TRAIT_LVL);
if (Name.empty()) {
Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_options)
<< CONTEXT_TRAIT_LVL << listOpenMPContextTraitProperties(Set, Selector);
return;
}
- TIProperty.Kind = getOpenMPContextTraitPropertyKind(Set, Name);
+ TIProperty.RawString = Name;
+ TIProperty.Kind = getOpenMPContextTraitPropertyKind(Set, Selector, Name);
if (TIProperty.Kind != TraitProperty::invalid) {
if (checkForDuplicates(*this, Name, NameLoc, Seen, CONTEXT_TRAIT_LVL))
TIProperty.Kind = TraitProperty::invalid;
@@ -910,7 +917,7 @@ void Parser::parseOMPTraitPropertyKind(
{TraitSet::construct, TraitSet::user, TraitSet::implementation,
TraitSet::device}) {
TraitProperty PropertyForName =
- getOpenMPContextTraitPropertyKind(PotentialSet, Name);
+ getOpenMPContextTraitPropertyKind(PotentialSet, Selector, Name);
if (PropertyForName == TraitProperty::invalid)
continue;
Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
@@ -936,6 +943,14 @@ static bool checkExtensionProperty(Parser &P, SourceLocation Loc,
if (TIProperty.Kind == TraitProperty::invalid)
return false;
+ if (TIProperty.Kind ==
+ TraitProperty::implementation_extension_disable_implicit_base)
+ return true;
+
+ if (TIProperty.Kind ==
+ TraitProperty::implementation_extension_allow_templates)
+ return true;
+
auto IsMatchExtension = [](OMPTraitProperty &TP) {
return (TP.Kind ==
llvm::omp::TraitProperty::implementation_extension_match_all ||
@@ -949,8 +964,8 @@ static bool checkExtensionProperty(Parser &P, SourceLocation Loc,
for (OMPTraitProperty &SeenProp : TISelector.Properties)
if (IsMatchExtension(SeenProp)) {
P.Diag(Loc, diag::err_omp_variant_ctx_second_match_extension);
- StringRef SeenName =
- llvm::omp::getOpenMPContextTraitPropertyName(SeenProp.Kind);
+ StringRef SeenName = llvm::omp::getOpenMPContextTraitPropertyName(
+ SeenProp.Kind, SeenProp.RawString);
SourceLocation SeenLoc = Seen[SeenName];
P.Diag(SeenLoc, diag::note_omp_declare_variant_ctx_used_here)
<< CONTEXT_TRAIT_LVL << SeenName;
@@ -995,11 +1010,13 @@ void Parser::parseOMPContextProperty(OMPTraitSelector &TISelector,
}
Diag(PropertyLoc, diag::warn_omp_ctx_incompatible_property_for_selector)
- << getOpenMPContextTraitPropertyName(TIProperty.Kind)
+ << getOpenMPContextTraitPropertyName(TIProperty.Kind,
+ TIProperty.RawString)
<< getOpenMPContextTraitSelectorName(TISelector.Kind)
<< getOpenMPContextTraitSetName(Set);
Diag(PropertyLoc, diag::note_omp_ctx_compatible_set_and_selector_for_property)
- << getOpenMPContextTraitPropertyName(TIProperty.Kind)
+ << getOpenMPContextTraitPropertyName(TIProperty.Kind,
+ TIProperty.RawString)
<< getOpenMPContextTraitSelectorName(
getOpenMPContextTraitSelectorForProperty(TIProperty.Kind))
<< getOpenMPContextTraitSetName(
@@ -1008,14 +1025,13 @@ void Parser::parseOMPContextProperty(OMPTraitSelector &TISelector,
<< CONTEXT_TRAIT_LVL;
}
-void Parser::parseOMPTraitSelectorKind(
- OMPTraitSelector &TISelector, llvm::omp::TraitSet Set,
- llvm::StringMap<SourceLocation> &Seen) {
+void Parser::parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
+ llvm::omp::TraitSet Set,
+ llvm::StringMap<SourceLocation> &Seen) {
TISelector.Kind = TraitSelector::invalid;
SourceLocation NameLoc = Tok.getLocation();
- StringRef Name = getNameFromIdOrString(*this, Tok, CONTEXT_SELECTOR_LVL
- );
+ StringRef Name = getNameFromIdOrString(*this, Tok, CONTEXT_SELECTOR_LVL);
if (Name.empty()) {
Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_options)
<< CONTEXT_SELECTOR_LVL << listOpenMPContextTraitSelectors(Set);
@@ -1045,8 +1061,8 @@ void Parser::parseOMPTraitSelectorKind(
for (const auto &PotentialSet :
{TraitSet::construct, TraitSet::user, TraitSet::implementation,
TraitSet::device}) {
- TraitProperty PropertyForName =
- getOpenMPContextTraitPropertyKind(PotentialSet, Name);
+ TraitProperty PropertyForName = getOpenMPContextTraitPropertyKind(
+ PotentialSet, TraitSelector::invalid, Name);
if (PropertyForName == TraitProperty::invalid)
continue;
Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
@@ -1140,7 +1156,8 @@ void Parser::parseOMPContextSelector(
if (!RequiresProperty) {
TISelector.Properties.push_back(
- {getOpenMPContextTraitPropertyForSelector(TISelector.Kind)});
+ {getOpenMPContextTraitPropertyForSelector(TISelector.Kind),
+ getOpenMPContextTraitSelectorName(TISelector.Kind)});
return;
}
@@ -1157,7 +1174,8 @@ void Parser::parseOMPContextSelector(
if (!Condition.isUsable())
return FinishSelector();
TISelector.ScoreOrCondition = Condition.get();
- TISelector.Properties.push_back({TraitProperty::user_condition_unknown});
+ TISelector.Properties.push_back(
+ {TraitProperty::user_condition_unknown, "<condition>"});
return;
}
@@ -1199,8 +1217,7 @@ void Parser::parseOMPTraitSetKind(OMPTraitSet &TISet,
TISet.Kind = TraitSet::invalid;
SourceLocation NameLoc = Tok.getLocation();
- StringRef Name = getNameFromIdOrString(*this, Tok, CONTEXT_SELECTOR_SET_LVL
- );
+ StringRef Name = getNameFromIdOrString(*this, Tok, CONTEXT_SELECTOR_SET_LVL);
if (Name.empty()) {
Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_options)
<< CONTEXT_SELECTOR_SET_LVL << listOpenMPContextTraitSets();
@@ -1236,8 +1253,8 @@ void Parser::parseOMPTraitSetKind(OMPTraitSet &TISet,
for (const auto &PotentialSet :
{TraitSet::construct, TraitSet::user, TraitSet::implementation,
TraitSet::device}) {
- TraitProperty PropertyForName =
- getOpenMPContextTraitPropertyKind(PotentialSet, Name);
+ TraitProperty PropertyForName = getOpenMPContextTraitPropertyKind(
+ PotentialSet, TraitSelector::invalid, Name);
if (PropertyForName == TraitProperty::invalid)
continue;
Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
@@ -1258,8 +1275,7 @@ void Parser::parseOMPTraitSetKind(OMPTraitSet &TISet,
///
/// <trait-set-selector-name> '=' '{' <trait-selector> [, <trait-selector>]* '}'
void Parser::parseOMPContextSelectorSet(
- OMPTraitSet &TISet,
- llvm::StringMap<SourceLocation> &SeenSets) {
+ OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets) {
auto OuterBC = BraceCount;
// If anything went wrong we issue an error or warning and then skip the rest
@@ -1335,7 +1351,7 @@ void Parser::parseOMPContextSelectorSet(
/// Parse OpenMP context selectors:
///
/// <trait-set-selector> [, <trait-set-selector>]*
-bool Parser::parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo& TI) {
+bool Parser::parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI) {
llvm::StringMap<SourceLocation> SeenSets;
do {
OMPTraitSet TISet;
@@ -1382,8 +1398,10 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
return;
}
- OMPTraitInfo &TI = Actions.getASTContext().getNewOMPTraitInfo();
- if (parseOMPDeclareVariantMatchClause(Loc, TI))
+ OMPTraitInfo *ParentTI = Actions.getOMPTraitInfoForSurroundingScope();
+ ASTContext &ASTCtx = Actions.getASTContext();
+ OMPTraitInfo &TI = ASTCtx.getNewOMPTraitInfo();
+ if (parseOMPDeclareVariantMatchClause(Loc, TI, ParentTI))
return;
Optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
@@ -1404,7 +1422,8 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
}
bool Parser::parseOMPDeclareVariantMatchClause(SourceLocation Loc,
- OMPTraitInfo &TI) {
+ OMPTraitInfo &TI,
+ OMPTraitInfo *ParentTI) {
// Parse 'match'.
OpenMPClauseKind CKind = Tok.isAnnotation()
? OMPC_unknown
@@ -1435,9 +1454,166 @@ bool Parser::parseOMPDeclareVariantMatchClause(SourceLocation Loc,
// Parse ')'
(void)T.consumeClose();
+
+ if (!ParentTI)
+ return false;
+
+ // Merge the parent/outer trait info into the one we just parsed and diagnose
+ // problems.
+ // TODO: Keep some source location in the TI to provide better diagnostics.
+ // TODO: Perform some kind of equivalence check on the condition and score
+ // expressions.
+ for (const OMPTraitSet &ParentSet : ParentTI->Sets) {
+ bool MergedSet = false;
+ for (OMPTraitSet &Set : TI.Sets) {
+ if (Set.Kind != ParentSet.Kind)
+ continue;
+ MergedSet = true;
+ for (const OMPTraitSelector &ParentSelector : ParentSet.Selectors) {
+ bool MergedSelector = false;
+ for (OMPTraitSelector &Selector : Set.Selectors) {
+ if (Selector.Kind != ParentSelector.Kind)
+ continue;
+ MergedSelector = true;
+ for (const OMPTraitProperty &ParentProperty :
+ ParentSelector.Properties) {
+ bool MergedProperty = false;
+ for (OMPTraitProperty &Property : Selector.Properties) {
+ // Ignore "equivalent" properties.
+ if (Property.Kind != ParentProperty.Kind)
+ continue;
+
+ // If the kind is the same but the raw string not, we don't want
+ // to skip out on the property.
+ MergedProperty |= Property.RawString == ParentProperty.RawString;
+
+ if (Property.RawString == ParentProperty.RawString &&
+ Selector.ScoreOrCondition == ParentSelector.ScoreOrCondition)
+ continue;
+
+ if (Selector.Kind == llvm::omp::TraitSelector::user_condition) {
+ Diag(Loc, diag::err_omp_declare_variant_nested_user_condition);
+ } else if (Selector.ScoreOrCondition !=
+ ParentSelector.ScoreOrCondition) {
+ Diag(Loc, diag::err_omp_declare_variant_duplicate_nested_trait)
+ << getOpenMPContextTraitPropertyName(
+ ParentProperty.Kind, ParentProperty.RawString)
+ << getOpenMPContextTraitSelectorName(ParentSelector.Kind)
+ << getOpenMPContextTraitSetName(ParentSet.Kind);
+ }
+ }
+ if (!MergedProperty)
+ Selector.Properties.push_back(ParentProperty);
+ }
+ }
+ if (!MergedSelector)
+ Set.Selectors.push_back(ParentSelector);
+ }
+ }
+ if (!MergedSet)
+ TI.Sets.push_back(ParentSet);
+ }
+
return false;
}
+/// `omp assumes` or `omp begin/end assumes` <clause> [[,]<clause>]...
+/// where
+///
+/// clause:
+/// 'ext_IMPL_DEFINED'
+/// 'absent' '(' directive-name [, directive-name]* ')'
+/// 'contains' '(' directive-name [, directive-name]* ')'
+/// 'holds' '(' scalar-expression ')'
+/// 'no_openmp'
+/// 'no_openmp_routines'
+/// 'no_parallelism'
+///
+void Parser::ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
+ SourceLocation Loc) {
+ SmallVector<StringRef, 4> Assumptions;
+ bool SkippedClauses = false;
+
+ auto SkipBraces = [&](llvm::StringRef Spelling, bool IssueNote) {
+ BalancedDelimiterTracker T(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after, Spelling.data()))
+ return;
+ T.skipToEnd();
+ if (IssueNote && T.getCloseLocation().isValid())
+ Diag(T.getCloseLocation(),
+ diag::note_omp_assumption_clause_continue_here);
+ };
+
+ /// Helper to determine which AssumptionClauseMapping (ACM) in the
+ /// AssumptionClauseMappings table matches \p RawString. The return value is
+ /// the index of the matching ACM into the table or -1 if there was no match.
+ auto MatchACMClause = [&](StringRef RawString) {
+ llvm::StringSwitch<int> SS(RawString);
+ unsigned ACMIdx = 0;
+ for (const AssumptionClauseMappingInfo &ACMI : AssumptionClauseMappings) {
+ if (ACMI.StartsWith)
+ SS.StartsWith(ACMI.Identifier, ACMIdx++);
+ else
+ SS.Case(ACMI.Identifier, ACMIdx++);
+ }
+ return SS.Default(-1);
+ };
+
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ IdentifierInfo *II = nullptr;
+ SourceLocation StartLoc = Tok.getLocation();
+ int Idx = -1;
+ if (Tok.isAnyIdentifier()) {
+ II = Tok.getIdentifierInfo();
+ Idx = MatchACMClause(II->getName());
+ }
+ ConsumeAnyToken();
+
+ bool NextIsLPar = Tok.is(tok::l_paren);
+ // Handle unknown clauses by skipping them.
+ if (Idx == -1) {
+ Diag(StartLoc, diag::warn_omp_unknown_assumption_clause_missing_id)
+ << llvm::omp::getOpenMPDirectiveName(DKind)
+ << llvm::omp::getAllAssumeClauseOptions() << NextIsLPar;
+ if (NextIsLPar)
+ SkipBraces(II ? II->getName() : "", /* IssueNote */ true);
+ SkippedClauses = true;
+ continue;
+ }
+ const AssumptionClauseMappingInfo &ACMI = AssumptionClauseMappings[Idx];
+ if (ACMI.HasDirectiveList || ACMI.HasExpression) {
+ // TODO: We ignore absent, contains, and holds assumptions for now. We
+ // also do not verify the content in the parenthesis at all.
+ SkippedClauses = true;
+ SkipBraces(II->getName(), /* IssueNote */ false);
+ continue;
+ }
+
+ if (NextIsLPar) {
+ Diag(Tok.getLocation(),
+ diag::warn_omp_unknown_assumption_clause_without_args)
+ << II;
+ SkipBraces(II->getName(), /* IssueNote */ true);
+ }
+
+ assert(II && "Expected an identifier clause!");
+ StringRef Assumption = II->getName();
+ if (ACMI.StartsWith)
+ Assumption = Assumption.substr(ACMI.Identifier.size());
+ Assumptions.push_back(Assumption);
+ }
+
+ Actions.ActOnOpenMPAssumesDirective(Loc, DKind, Assumptions, SkippedClauses);
+}
+
+void Parser::ParseOpenMPEndAssumesDirective(SourceLocation Loc) {
+ if (Actions.isInOpenMPAssumeScope())
+ Actions.ActOnOpenMPEndAssumesDirective();
+ else
+ Diag(Loc, diag::err_expected_begin_assumes);
+}
+
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
@@ -1449,16 +1625,16 @@ bool Parser::parseOMPDeclareVariantMatchClause(SourceLocation Loc,
/// device_type-clause:
/// 'device_type' '(' 'host' | 'nohost' | 'any' )'
namespace {
- struct SimpleClauseData {
- unsigned Type;
- SourceLocation Loc;
- SourceLocation LOpen;
- SourceLocation TypeLoc;
- SourceLocation RLoc;
- SimpleClauseData(unsigned Type, SourceLocation Loc, SourceLocation LOpen,
- SourceLocation TypeLoc, SourceLocation RLoc)
- : Type(Type), Loc(Loc), LOpen(LOpen), TypeLoc(TypeLoc), RLoc(RLoc) {}
- };
+struct SimpleClauseData {
+ unsigned Type;
+ SourceLocation Loc;
+ SourceLocation LOpen;
+ SourceLocation TypeLoc;
+ SourceLocation RLoc;
+ SimpleClauseData(unsigned Type, SourceLocation Loc, SourceLocation LOpen,
+ SourceLocation TypeLoc, SourceLocation RLoc)
+ : Type(Type), Loc(Loc), LOpen(LOpen), TypeLoc(TypeLoc), RLoc(RLoc) {}
+};
} // anonymous namespace
static Optional<SimpleClauseData>
@@ -1473,7 +1649,8 @@ parseOpenMPSimpleClause(Parser &P, OpenMPClauseKind Kind) {
return llvm::None;
unsigned Type = getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : P.getPreprocessor().getSpelling(Tok));
+ Kind, Tok.isAnnotation() ? "" : P.getPreprocessor().getSpelling(Tok),
+ P.getLangOpts().OpenMP);
SourceLocation TypeLoc = Tok.getLocation();
if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
Tok.isNot(tok::annot_pragma_openmp_end))
@@ -1521,7 +1698,7 @@ Parser::DeclGroupPtrTy Parser::ParseOMPDeclareTargetClauses() {
Diag(DevTypeData.getValue().Loc,
diag::warn_omp_more_one_device_type_clause);
}
- switch(static_cast<OpenMPDeviceType>(DevTypeData.getValue().Type)) {
+ switch (static_cast<OpenMPDeviceType>(DevTypeData.getValue().Type)) {
case OMPC_DEVICE_TYPE_any:
DT = OMPDeclareTargetDeclAttr::DT_Any;
break;
@@ -1645,6 +1822,14 @@ void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
/// annot_pragma_openmp 'requires' <clause> [[[,] <clause>] ... ]
/// annot_pragma_openmp_end
///
+/// assumes directive:
+/// annot_pragma_openmp 'assumes' <clause> [[[,] <clause>] ... ]
+/// annot_pragma_openmp_end
+/// or
+/// annot_pragma_openmp 'begin assumes' <clause> [[[,] <clause>] ... ]
+/// annot_pragma_openmp 'end assumes'
+/// annot_pragma_openmp_end
+///
Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed,
DeclSpec::TST TagType, Decl *Tag) {
@@ -1782,6 +1967,13 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
ConsumeAnnotationToken();
return Actions.ActOnOpenMPRequiresDirective(StartLoc, Clauses);
}
+ case OMPD_assumes:
+ case OMPD_begin_assumes:
+ ParseOpenMPAssumesDirective(DKind, ConsumeToken());
+ break;
+ case OMPD_end_assumes:
+ ParseOpenMPEndAssumesDirective(ConsumeToken());
+ break;
case OMPD_declare_reduction:
ConsumeToken();
if (DeclGroupPtrTy Res = ParseOpenMPDeclareReductionDirective(AS)) {
@@ -1807,8 +1999,10 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
// { #pragma omp end declare variant }
//
ConsumeToken();
- OMPTraitInfo &TI = Actions.getASTContext().getNewOMPTraitInfo();
- if (parseOMPDeclareVariantMatchClause(Loc, TI))
+ OMPTraitInfo *ParentTI = Actions.getOMPTraitInfoForSurroundingScope();
+ ASTContext &ASTCtx = Actions.getASTContext();
+ OMPTraitInfo &TI = ASTCtx.getNewOMPTraitInfo();
+ if (parseOMPDeclareVariantMatchClause(Loc, TI, ParentTI))
break;
// Skip last tokens.
@@ -1817,10 +2011,16 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
VariantMatchInfo VMI;
- ASTContext &ASTCtx = Actions.getASTContext();
TI.getAsVariantMatchInfo(ASTCtx, VMI);
- OMPContext OMPCtx(ASTCtx.getLangOpts().OpenMPIsDevice,
- ASTCtx.getTargetInfo().getTriple());
+
+ std::function<void(StringRef)> DiagUnknownTrait = [this, Loc](
+ StringRef ISATrait) {
+ // TODO Track the selector locations in a way that is accessible here to
+ // improve the diagnostic location.
+ Diag(Loc, diag::warn_unknown_begin_declare_variant_isa_trait) << ISATrait;
+ };
+ TargetOMPContext OMPCtx(ASTCtx, std::move(DiagUnknownTrait),
+ /* CurrentFunctionDecl */ nullptr);
if (isVariantApplicableInContext(VMI, OMPCtx, /* DeviceSetOnly */ true)) {
Actions.ActOnOpenMPBeginDeclareVariant(Loc, TI);
@@ -1866,7 +2066,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
CachedTokens Toks;
Toks.push_back(Tok);
ConsumeToken();
- while(Tok.isNot(tok::annot_pragma_openmp_end)) {
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
Toks.push_back(Tok);
ConsumeAnyToken();
}
@@ -1914,7 +2114,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
return DeclGroupPtrTy();
ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
- llvm::SmallVector<Decl *, 4> Decls;
+ llvm::SmallVector<Decl *, 4> Decls;
DKind = parseOpenMPDirectiveKind(*this);
while (DKind != OMPD_end_declare_target && Tok.isNot(tok::eof) &&
Tok.isNot(tok::r_brace)) {
@@ -2374,8 +2574,8 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
//
bool Parser::ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
- const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
- Callback,
+ const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)>
+ &Callback,
bool AllowScopeSpecifier) {
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
@@ -2505,8 +2705,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// Check if clause is allowed for the given directive.
if (CKind != OMPC_unknown &&
!isAllowedClauseForDirective(DKind, CKind, getLangOpts().OpenMP)) {
- Diag(Tok, diag::err_omp_unexpected_clause) << getOpenMPClauseName(CKind)
- << getOpenMPDirectiveName(DKind);
+ Diag(Tok, diag::err_omp_unexpected_clause)
+ << getOpenMPClauseName(CKind) << getOpenMPDirectiveName(DKind);
ErrorFound = true;
WrongDirective = true;
}
@@ -2702,8 +2902,8 @@ ExprResult Parser::ParseOpenMPParensExpr(StringRef ClauseName,
return ExprError();
SourceLocation ELoc = Tok.getLocation();
- ExprResult LHS(ParseCastExpression(AnyCastExpr, IsAddressOfOperand,
- NotTypeCast));
+ ExprResult LHS(
+ ParseCastExpression(AnyCastExpr, IsAddressOfOperand, NotTypeCast));
ExprResult Val(ParseRHSOfBinaryExpression(LHS, prec::Conditional));
Val = Actions.ActOnFinishFullExpr(Val.get(), ELoc, /*DiscardedValue*/ false);
@@ -2871,7 +3071,8 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
Arg[Modifier2] = OMPC_SCHEDULE_MODIFIER_unknown;
Arg[ScheduleKind] = OMPC_SCHEDULE_unknown;
unsigned KindModifier = getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
+ getLangOpts().OpenMP);
if (KindModifier > OMPC_SCHEDULE_unknown) {
// Parse 'modifier'
Arg[Modifier1] = KindModifier;
@@ -2883,7 +3084,8 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
// Parse ',' 'modifier'
ConsumeAnyToken();
KindModifier = getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
+ getLangOpts().OpenMP);
Arg[Modifier2] = KindModifier > OMPC_SCHEDULE_unknown
? KindModifier
: (unsigned)OMPC_SCHEDULE_unknown;
@@ -2898,7 +3100,8 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
else
Diag(Tok, diag::warn_pragma_expected_colon) << "schedule modifier";
KindModifier = getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
+ getLangOpts().OpenMP);
}
Arg[ScheduleKind] = KindModifier;
KLoc[ScheduleKind] = Tok.getLocation();
@@ -2912,7 +3115,8 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
DelimLoc = ConsumeAnyToken();
} else if (Kind == OMPC_dist_schedule) {
Arg.push_back(getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok)));
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
+ getLangOpts().OpenMP));
KLoc.push_back(Tok.getLocation());
if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
Tok.isNot(tok::annot_pragma_openmp_end))
@@ -2922,7 +3126,8 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
} else if (Kind == OMPC_defaultmap) {
// Get a defaultmap modifier
unsigned Modifier = getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
+ getLangOpts().OpenMP);
// Set defaultmap modifier to unknown if it is either scalar, aggregate, or
// pointer
if (Modifier < OMPC_DEFAULTMAP_MODIFIER_unknown)
@@ -2940,7 +3145,8 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
Diag(Tok, diag::warn_pragma_expected_colon) << "defaultmap modifier";
// Get a defaultmap kind
Arg.push_back(getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok)));
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
+ getLangOpts().OpenMP));
KLoc.push_back(Tok.getLocation());
if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
Tok.isNot(tok::annot_pragma_openmp_end))
@@ -2955,7 +3161,8 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
NextToken().is(tok::colon)) {
// Parse optional <device modifier> ':'
Arg.push_back(getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok)));
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
+ getLangOpts().OpenMP));
KLoc.push_back(Tok.getLocation());
ConsumeAnyToken();
// Parse ':'
@@ -3057,14 +3264,16 @@ static bool ParseReductionId(Parser &P, CXXScopeSpec &ReductionIdScopeSpec,
}
/// Checks if the token is a valid map-type-modifier.
+/// FIXME: It will return an OpenMPMapClauseKind if that's what it parses.
static OpenMPMapModifierKind isMapModifier(Parser &P) {
Token Tok = P.getCurToken();
if (!Tok.is(tok::identifier))
return OMPC_MAP_MODIFIER_unknown;
Preprocessor &PP = P.getPreprocessor();
- OpenMPMapModifierKind TypeModifier = static_cast<OpenMPMapModifierKind>(
- getOpenMPSimpleClauseType(OMPC_map, PP.getSpelling(Tok)));
+ OpenMPMapModifierKind TypeModifier =
+ static_cast<OpenMPMapModifierKind>(getOpenMPSimpleClauseType(
+ OMPC_map, PP.getSpelling(Tok), P.getLangOpts().OpenMP));
return TypeModifier;
}
@@ -3099,12 +3308,14 @@ bool Parser::parseMapperModifier(OpenMPVarListDataTy &Data) {
/// Parse map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
-/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
+/// where, map-type-modifier ::= always | close | mapper(mapper-identifier) |
+/// present
bool Parser::parseMapTypeModifiers(OpenMPVarListDataTy &Data) {
while (getCurToken().isNot(tok::colon)) {
OpenMPMapModifierKind TypeModifier = isMapModifier(*this);
if (TypeModifier == OMPC_MAP_MODIFIER_always ||
- TypeModifier == OMPC_MAP_MODIFIER_close) {
+ TypeModifier == OMPC_MAP_MODIFIER_close ||
+ TypeModifier == OMPC_MAP_MODIFIER_present) {
Data.MapTypeModifiers.push_back(TypeModifier);
Data.MapTypeModifiersLoc.push_back(Tok.getLocation());
ConsumeToken();
@@ -3126,7 +3337,8 @@ bool Parser::parseMapTypeModifiers(OpenMPVarListDataTy &Data) {
// Potential map-type token as it is followed by a colon.
if (PP.LookAhead(0).is(tok::colon))
return false;
- Diag(Tok, diag::err_omp_unknown_map_type_modifier);
+ Diag(Tok, diag::err_omp_unknown_map_type_modifier)
+ << (getLangOpts().OpenMP >= 51 ? 1 : 0);
ConsumeToken();
}
if (getCurToken().is(tok::comma))
@@ -3136,14 +3348,16 @@ bool Parser::parseMapTypeModifiers(OpenMPVarListDataTy &Data) {
}
/// Checks if the token is a valid map-type.
+/// FIXME: It will return an OpenMPMapModifierKind if that's what it parses.
static OpenMPMapClauseKind isMapType(Parser &P) {
Token Tok = P.getCurToken();
// The map-type token can be either an identifier or the C++ delete keyword.
if (!Tok.isOneOf(tok::identifier, tok::kw_delete))
return OMPC_MAP_unknown;
Preprocessor &PP = P.getPreprocessor();
- OpenMPMapClauseKind MapType = static_cast<OpenMPMapClauseKind>(
- getOpenMPSimpleClauseType(OMPC_map, PP.getSpelling(Tok)));
+ OpenMPMapClauseKind MapType =
+ static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
+ OMPC_map, PP.getSpelling(Tok), P.getLangOpts().OpenMP));
return MapType;
}
@@ -3164,7 +3378,6 @@ static void parseMapType(Parser &P, Parser::OpenMPVarListDataTy &Data) {
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
-/// \param RLoc Returned location of right paren.
ExprResult Parser::ParseOpenMPIteratorsExpr() {
assert(Tok.is(tok::identifier) && PP.getSpelling(Tok) == "iterator" &&
"Expected 'iterator' token.");
@@ -3288,7 +3501,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
bool HasIterator = false;
bool NeedRParenForLinear = false;
BalancedDelimiterTracker LinearT(*this, tok::l_paren,
- tok::annot_pragma_openmp_end);
+ tok::annot_pragma_openmp_end);
// Handle reduction-identifier for reduction clause.
if (Kind == OMPC_reduction || Kind == OMPC_task_reduction ||
Kind == OMPC_in_reduction) {
@@ -3297,7 +3510,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
(Tok.is(tok::identifier) || Tok.is(tok::kw_default)) &&
NextToken().is(tok::comma)) {
// Parse optional reduction modifier.
- Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok));
+ Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok),
+ getLangOpts().OpenMP);
Data.ExtraModifierLoc = Tok.getLocation();
ConsumeToken();
assert(Tok.is(tok::comma) && "Expected comma.");
@@ -3342,7 +3556,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// Handle dependency type for depend clause.
ColonProtectionRAIIObject ColonRAII(*this);
Data.ExtraModifier = getOpenMPSimpleClauseType(
- Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : "");
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : "",
+ getLangOpts().OpenMP);
Data.ExtraModifierLoc = Tok.getLocation();
if (Data.ExtraModifier == OMPC_DEPEND_unknown) {
SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
@@ -3367,7 +3582,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// Try to parse modifier if any.
Data.ExtraModifier = OMPC_LINEAR_val;
if (Tok.is(tok::identifier) && PP.LookAhead(0).is(tok::l_paren)) {
- Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok));
+ Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok),
+ getLangOpts().OpenMP);
Data.ExtraModifierLoc = ConsumeToken();
LinearT.consumeOpen();
NeedRParenForLinear = true;
@@ -3380,7 +3596,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if ((getLangOpts().OpenMP >= 50 && !isOpenMPDistributeDirective(DKind) &&
!isOpenMPTaskLoopDirective(DKind)) &&
Tok.is(tok::identifier) && PP.LookAhead(0).is(tok::colon)) {
- Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok));
+ Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok),
+ getLangOpts().OpenMP);
Data.ExtraModifierLoc = Tok.getLocation();
ConsumeToken();
assert(Tok.is(tok::colon) && "Expected colon.");
@@ -3400,7 +3617,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
TentativeParsingAction TPA(*this);
bool ColonPresent = false;
if (SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
- StopBeforeMatch)) {
+ StopBeforeMatch)) {
if (Tok.is(tok::colon))
ColonPresent = true;
}
@@ -3422,34 +3639,43 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (Tok.is(tok::colon))
Data.ColonLoc = ConsumeToken();
} else if (Kind == OMPC_to || Kind == OMPC_from) {
- if (Tok.is(tok::identifier)) {
- bool IsMapperModifier = false;
- if (Kind == OMPC_to) {
- auto Modifier = static_cast<OpenMPToModifierKind>(
- getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)));
- if (Modifier == OMPC_TO_MODIFIER_mapper)
- IsMapperModifier = true;
- } else {
- auto Modifier = static_cast<OpenMPFromModifierKind>(
- getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)));
- if (Modifier == OMPC_FROM_MODIFIER_mapper)
- IsMapperModifier = true;
+ while (Tok.is(tok::identifier)) {
+ auto Modifier =
+ static_cast<OpenMPMotionModifierKind>(getOpenMPSimpleClauseType(
+ Kind, PP.getSpelling(Tok), getLangOpts().OpenMP));
+ if (Modifier == OMPC_MOTION_MODIFIER_unknown)
+ break;
+ Data.MotionModifiers.push_back(Modifier);
+ Data.MotionModifiersLoc.push_back(Tok.getLocation());
+ ConsumeToken();
+ if (Modifier == OMPC_MOTION_MODIFIER_mapper) {
+ IsInvalidMapperModifier = parseMapperModifier(Data);
+ if (IsInvalidMapperModifier)
+ break;
}
- if (IsMapperModifier) {
- // Parse the mapper modifier.
+ // OpenMP < 5.1 doesn't permit a ',' or additional modifiers.
+ if (getLangOpts().OpenMP < 51)
+ break;
+ // OpenMP 5.1 accepts an optional ',' even if the next character is ':'.
+ // TODO: Is that intentional?
+ if (Tok.is(tok::comma))
ConsumeToken();
- IsInvalidMapperModifier = parseMapperModifier(Data);
- if (Tok.isNot(tok::colon)) {
- if (!IsInvalidMapperModifier)
- Diag(Tok, diag::warn_pragma_expected_colon) << ")";
- SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
- StopBeforeMatch);
- }
- // Consume ':'.
- if (Tok.is(tok::colon))
- ConsumeToken();
+ }
+ if (!Data.MotionModifiers.empty() && Tok.isNot(tok::colon)) {
+ if (!IsInvalidMapperModifier) {
+ if (getLangOpts().OpenMP < 51)
+ Diag(Tok, diag::warn_pragma_expected_colon) << ")";
+ else
+ Diag(Tok, diag::warn_pragma_expected_colon) << "motion modifier";
}
+ SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
}
+ // OpenMP 5.1 permits a ':' even without a preceding modifier. TODO: Is
+ // that intentional?
+ if ((!Data.MotionModifiers.empty() || getLangOpts().OpenMP >= 51) &&
+ Tok.is(tok::colon))
+ Data.ColonLoc = ConsumeToken();
} else if (Kind == OMPC_allocate ||
(Kind == OMPC_affinity && Tok.is(tok::identifier) &&
PP.getSpelling(Tok) == "iterator")) {
@@ -3625,6 +3851,6 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
Kind, Vars, Data.DepModOrTailExpr, Locs, Data.ColonLoc,
Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId,
Data.ExtraModifier, Data.MapTypeModifiers, Data.MapTypeModifiersLoc,
- Data.IsMapTypeImplicit, Data.ExtraModifierLoc);
+ Data.IsMapTypeImplicit, Data.ExtraModifierLoc, Data.MotionModifiers,
+ Data.MotionModifiersLoc);
}
-
diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp
index 6402b31d00b2..f9b852826775 100644
--- a/clang/lib/Parse/ParsePragma.cpp
+++ b/clang/lib/Parse/ParsePragma.cpp
@@ -103,13 +103,15 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
+ Token PragmaName = Tok;
+ if (!PP.getTargetInfo().hasStrictFP() && !PP.getLangOpts().ExpStrictFP) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_fp_ignored)
+ << PragmaName.getIdentifierInfo()->getName();
+ return;
+ }
tok::OnOffSwitch OOS;
if (PP.LexOnOffSwitch(OOS))
return;
- if (OOS == tok::OOS_ON) {
- PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
- return;
- }
MutableArrayRef<Token> Toks(PP.getPreprocessorAllocator().Allocate<Token>(1),
1);
@@ -135,6 +137,14 @@ struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
}
};
+/// Handler for "\#pragma STDC FENV_ROUND ...".
+struct PragmaSTDC_FENV_ROUNDHandler : public PragmaHandler {
+ PragmaSTDC_FENV_ROUNDHandler() : PragmaHandler("FENV_ROUND") {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &Tok) override;
+};
+
/// PragmaSTDC_UnknownHandler - "\#pragma STDC ...".
struct PragmaSTDC_UnknownHandler : public PragmaHandler {
PragmaSTDC_UnknownHandler() = default;
@@ -312,8 +322,11 @@ void Parser::initializePragmaHandlers() {
FPContractHandler = std::make_unique<PragmaFPContractHandler>();
PP.AddPragmaHandler("STDC", FPContractHandler.get());
- STDCFENVHandler = std::make_unique<PragmaSTDC_FENV_ACCESSHandler>();
- PP.AddPragmaHandler("STDC", STDCFENVHandler.get());
+ STDCFenvAccessHandler = std::make_unique<PragmaSTDC_FENV_ACCESSHandler>();
+ PP.AddPragmaHandler("STDC", STDCFenvAccessHandler.get());
+
+ STDCFenvRoundHandler = std::make_unique<PragmaSTDC_FENV_ROUNDHandler>();
+ PP.AddPragmaHandler("STDC", STDCFenvRoundHandler.get());
STDCCXLIMITHandler = std::make_unique<PragmaSTDC_CX_LIMITED_RANGEHandler>();
PP.AddPragmaHandler("STDC", STDCCXLIMITHandler.get());
@@ -485,8 +498,11 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler("STDC", FPContractHandler.get());
FPContractHandler.reset();
- PP.RemovePragmaHandler("STDC", STDCFENVHandler.get());
- STDCFENVHandler.reset();
+ PP.RemovePragmaHandler("STDC", STDCFenvAccessHandler.get());
+ STDCFenvAccessHandler.reset();
+
+ PP.RemovePragmaHandler("STDC", STDCFenvRoundHandler.get());
+ STDCFenvRoundHandler.reset();
PP.RemovePragmaHandler("STDC", STDCCXLIMITHandler.get());
STDCCXLIMITHandler.reset();
@@ -697,6 +713,14 @@ void Parser::HandlePragmaFEnvAccess() {
Actions.ActOnPragmaFEnvAccess(PragmaLoc, IsEnabled);
}
+void Parser::HandlePragmaFEnvRound() {
+ assert(Tok.is(tok::annot_pragma_fenv_round));
+ auto RM = static_cast<llvm::RoundingMode>(
+ reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+
+ SourceLocation PragmaLoc = ConsumeAnnotationToken();
+ Actions.setRoundingMode(PragmaLoc, RM);
+}
StmtResult Parser::HandlePragmaCaptured()
{
@@ -766,7 +790,7 @@ void Parser::HandlePragmaOpenCLExtension() {
PP.Diag(NameLoc, diag::warn_pragma_unknown_extension) << Ident;
else if (Opt.isSupportedExtension(Name, getLangOpts()))
Opt.enable(Name, State == Enable);
- else if (Opt.isSupportedCore(Name, getLangOpts()))
+ else if (Opt.isSupportedCoreOrOptionalCore(Name, getLangOpts()))
PP.Diag(NameLoc, diag::warn_pragma_extension_is_core) << Ident;
else
PP.Diag(NameLoc, diag::warn_pragma_unsupported_extension) << Ident;
@@ -1163,12 +1187,79 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
<< PragmaLoopHintString(Info->PragmaName, Info->Option);
Hint.StateLoc = IdentifierLoc::create(Actions.Context, StateLoc, StateInfo);
+ } else if (OptionInfo && OptionInfo->getName() == "vectorize_width") {
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/false,
+ /*IsReinject=*/false);
+ ConsumeAnnotationToken();
+
+ SourceLocation StateLoc = Toks[0].getLocation();
+ IdentifierInfo *StateInfo = Toks[0].getIdentifierInfo();
+ StringRef IsScalableStr = StateInfo ? StateInfo->getName() : "";
+
+ // Look for vectorize_width(fixed|scalable)
+ if (IsScalableStr == "scalable" || IsScalableStr == "fixed") {
+ PP.Lex(Tok); // Identifier
+
+ if (Toks.size() > 2) {
+ Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << PragmaLoopHintString(Info->PragmaName, Info->Option);
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+
+ Hint.StateLoc =
+ IdentifierLoc::create(Actions.Context, StateLoc, StateInfo);
+
+ ConsumeToken(); // Consume the constant expression eof terminator.
+ } else {
+ // Enter constant expression including eof terminator into token stream.
+ ExprResult R = ParseConstantExpression();
+
+ if (R.isInvalid() && !Tok.is(tok::comma))
+ Diag(Toks[0].getLocation(),
+ diag::note_pragma_loop_invalid_vectorize_option);
+
+ bool Arg2Error = false;
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok); // ,
+
+ StateInfo = Tok.getIdentifierInfo();
+ IsScalableStr = StateInfo->getName();
+
+ if (IsScalableStr != "scalable" && IsScalableStr != "fixed") {
+ Diag(Tok.getLocation(),
+ diag::err_pragma_loop_invalid_vectorize_option);
+ Arg2Error = true;
+ } else
+ Hint.StateLoc =
+ IdentifierLoc::create(Actions.Context, StateLoc, StateInfo);
+
+ PP.Lex(Tok); // Identifier
+ }
+
+ // Tokens following an error in an ill-formed constant expression will
+ // remain in the token stream and must be removed.
+ if (Tok.isNot(tok::eof)) {
+ Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << PragmaLoopHintString(Info->PragmaName, Info->Option);
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+
+ ConsumeToken(); // Consume the constant expression eof terminator.
+
+ if (Arg2Error || R.isInvalid() ||
+ Actions.CheckLoopHintExpr(R.get(), Toks[0].getLocation()))
+ return false;
+
+ // Argument is a constant expression with an integer type.
+ Hint.ValueExpr = R.get();
+ }
} else {
// Enter constant expression including eof terminator into token stream.
PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/false,
/*IsReinject=*/false);
ConsumeAnnotationToken();
-
ExprResult R = ParseConstantExpression();
// Tokens following an error in an ill-formed constant expression will
@@ -1712,9 +1803,10 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
// In MSVC/gcc, #pragma pack(4) sets the alignment without affecting
// the push/pop stack.
- // In Apple gcc, #pragma pack(4) is equivalent to #pragma pack(push, 4)
- Action =
- PP.getLangOpts().ApplePragmaPack ? Sema::PSK_Push_Set : Sema::PSK_Set;
+ // In Apple gcc/XL, #pragma pack(4) is equivalent to #pragma pack(push, 4)
+ Action = (PP.getLangOpts().ApplePragmaPack || PP.getLangOpts().XLPragmaPack)
+ ? Sema::PSK_Push_Set
+ : Sema::PSK_Set;
} else if (Tok.is(tok::identifier)) {
const IdentifierInfo *II = Tok.getIdentifierInfo();
if (II->isStr("show")) {
@@ -1762,10 +1854,12 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
}
}
}
- } else if (PP.getLangOpts().ApplePragmaPack) {
+ } else if (PP.getLangOpts().ApplePragmaPack ||
+ PP.getLangOpts().XLPragmaPack) {
// In MSVC/gcc, #pragma pack() resets the alignment without affecting
// the push/pop stack.
- // In Apple gcc #pragma pack() is equivalent to #pragma pack(pop).
+ // In Apple gcc and IBM XL, #pragma pack() is equivalent to #pragma
+ // pack(pop).
Action = Sema::PSK_Pop;
}
@@ -1894,6 +1988,7 @@ void PragmaClangSectionHandler::HandlePragma(Preprocessor &PP,
// #pragma 'align' '=' {'native','natural','mac68k','power','reset'}
// #pragma 'options 'align' '=' {'native','natural','mac68k','power','reset'}
+// #pragma 'align' '(' {'native','natural','mac68k','power','reset'} ')'
static void ParseAlignPragma(Preprocessor &PP, Token &FirstTok,
bool IsOptions) {
Token Tok;
@@ -1908,7 +2003,12 @@ static void ParseAlignPragma(Preprocessor &PP, Token &FirstTok,
}
PP.Lex(Tok);
- if (Tok.isNot(tok::equal)) {
+ if (PP.getLangOpts().XLPragmaPack) {
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "align";
+ return;
+ }
+ } else if (Tok.isNot(tok::equal)) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_align_expected_equal)
<< IsOptions;
return;
@@ -1941,6 +2041,14 @@ static void ParseAlignPragma(Preprocessor &PP, Token &FirstTok,
return;
}
+ if (PP.getLangOpts().XLPragmaPack) {
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen) << "align";
+ return;
+ }
+ }
+
SourceLocation EndLoc = Tok.getLocation();
PP.Lex(Tok);
if (Tok.isNot(tok::eod)) {
@@ -2535,6 +2643,12 @@ void PragmaFloatControlHandler::HandlePragma(Preprocessor &PP,
Token &Tok) {
Sema::PragmaMsStackAction Action = Sema::PSK_Set;
SourceLocation FloatControlLoc = Tok.getLocation();
+ Token PragmaName = Tok;
+ if (!PP.getTargetInfo().hasStrictFP() && !PP.getLangOpts().ExpStrictFP) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_fp_ignored)
+ << PragmaName.getIdentifierInfo()->getName();
+ return;
+ }
PP.Lex(Tok);
if (Tok.isNot(tok::l_paren)) {
PP.Diag(FloatControlLoc, diag::err_expected) << tok::l_paren;
@@ -2828,11 +2942,12 @@ void PragmaOptimizeHandler::HandlePragma(Preprocessor &PP,
namespace {
/// Used as the annotation value for tok::annot_pragma_fp.
struct TokFPAnnotValue {
- enum FlagKinds { Contract, Reassociate };
+ enum FlagKinds { Contract, Reassociate, Exceptions };
enum FlagValues { On, Off, Fast };
- FlagKinds FlagKind;
- FlagValues FlagValue;
+ llvm::Optional<LangOptions::FPModeKind> ContractValue;
+ llvm::Optional<LangOptions::FPModeKind> ReassociateValue;
+ llvm::Optional<LangOptions::FPExceptionModeKind> ExceptionsValue;
};
} // end anonymous namespace
@@ -2849,6 +2964,7 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
return;
}
+ auto *AnnotValue = new (PP.getPreprocessorAllocator()) TokFPAnnotValue;
while (Tok.is(tok::identifier)) {
IdentifierInfo *OptionInfo = Tok.getIdentifierInfo();
@@ -2857,6 +2973,7 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
OptionInfo->getName())
.Case("contract", TokFPAnnotValue::Contract)
.Case("reassociate", TokFPAnnotValue::Reassociate)
+ .Case("exceptions", TokFPAnnotValue::Exceptions)
.Default(None);
if (!FlagKind) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_option)
@@ -2875,25 +2992,49 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
if (Tok.isNot(tok::identifier)) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
<< PP.getSpelling(Tok) << OptionInfo->getName()
- << (FlagKind == TokFPAnnotValue::Reassociate);
+ << static_cast<int>(*FlagKind);
return;
}
const IdentifierInfo *II = Tok.getIdentifierInfo();
- auto FlagValue =
- llvm::StringSwitch<llvm::Optional<TokFPAnnotValue::FlagValues>>(
- II->getName())
- .Case("on", TokFPAnnotValue::On)
- .Case("off", TokFPAnnotValue::Off)
- .Case("fast", TokFPAnnotValue::Fast)
- .Default(llvm::None);
-
- if (!FlagValue || (FlagKind == TokFPAnnotValue::Reassociate &&
- FlagValue == TokFPAnnotValue::Fast)) {
- PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
- << PP.getSpelling(Tok) << OptionInfo->getName()
- << (FlagKind == TokFPAnnotValue::Reassociate);
- return;
+ if (FlagKind == TokFPAnnotValue::Contract) {
+ AnnotValue->ContractValue =
+ llvm::StringSwitch<llvm::Optional<LangOptions::FPModeKind>>(
+ II->getName())
+ .Case("on", LangOptions::FPModeKind::FPM_On)
+ .Case("off", LangOptions::FPModeKind::FPM_Off)
+ .Case("fast", LangOptions::FPModeKind::FPM_Fast)
+ .Default(llvm::None);
+ if (!AnnotValue->ContractValue) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
+ << PP.getSpelling(Tok) << OptionInfo->getName() << *FlagKind;
+ return;
+ }
+ } else if (FlagKind == TokFPAnnotValue::Reassociate) {
+ AnnotValue->ReassociateValue =
+ llvm::StringSwitch<llvm::Optional<LangOptions::FPModeKind>>(
+ II->getName())
+ .Case("on", LangOptions::FPModeKind::FPM_On)
+ .Case("off", LangOptions::FPModeKind::FPM_Off)
+ .Default(llvm::None);
+ if (!AnnotValue->ReassociateValue) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
+ << PP.getSpelling(Tok) << OptionInfo->getName() << *FlagKind;
+ return;
+ }
+ } else if (FlagKind == TokFPAnnotValue::Exceptions) {
+ AnnotValue->ExceptionsValue =
+ llvm::StringSwitch<llvm::Optional<LangOptions::FPExceptionModeKind>>(
+ II->getName())
+ .Case("ignore", LangOptions::FPE_Ignore)
+ .Case("maytrap", LangOptions::FPE_MayTrap)
+ .Case("strict", LangOptions::FPE_Strict)
+ .Default(llvm::None);
+ if (!AnnotValue->ExceptionsValue) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
+ << PP.getSpelling(Tok) << OptionInfo->getName() << *FlagKind;
+ return;
+ }
}
PP.Lex(Tok);
@@ -2903,17 +3044,6 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
return;
}
PP.Lex(Tok);
-
- auto *AnnotValue = new (PP.getPreprocessorAllocator())
- TokFPAnnotValue{*FlagKind, *FlagValue};
- // Generate the fp annotation token.
- Token FPTok;
- FPTok.startToken();
- FPTok.setKind(tok::annot_pragma_fp);
- FPTok.setLocation(PragmaName.getLocation());
- FPTok.setAnnotationEndLoc(PragmaName.getLocation());
- FPTok.setAnnotationValue(reinterpret_cast<void *>(AnnotValue));
- TokenList.push_back(FPTok);
}
if (Tok.isNot(tok::eod)) {
@@ -2922,6 +3052,14 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
return;
}
+ Token FPTok;
+ FPTok.startToken();
+ FPTok.setKind(tok::annot_pragma_fp);
+ FPTok.setLocation(PragmaName.getLocation());
+ FPTok.setAnnotationEndLoc(PragmaName.getLocation());
+ FPTok.setAnnotationValue(reinterpret_cast<void *>(AnnotValue));
+ TokenList.push_back(FPTok);
+
auto TokenArray = std::make_unique<Token[]>(TokenList.size());
std::copy(TokenList.begin(), TokenList.end(), TokenArray.get());
@@ -2929,29 +3067,76 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
/*DisableMacroExpansion=*/false, /*IsReinject=*/false);
}
+void PragmaSTDC_FENV_ROUNDHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &Tok) {
+ Token PragmaName = Tok;
+ SmallVector<Token, 1> TokenList;
+ if (!PP.getTargetInfo().hasStrictFP() && !PP.getLangOpts().ExpStrictFP) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_fp_ignored)
+ << PragmaName.getIdentifierInfo()->getName();
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << PragmaName.getIdentifierInfo()->getName();
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ auto RM =
+ llvm::StringSwitch<llvm::RoundingMode>(II->getName())
+ .Case("FE_TOWARDZERO", llvm::RoundingMode::TowardZero)
+ .Case("FE_TONEAREST", llvm::RoundingMode::NearestTiesToEven)
+ .Case("FE_UPWARD", llvm::RoundingMode::TowardPositive)
+ .Case("FE_DOWNWARD", llvm::RoundingMode::TowardNegative)
+ .Case("FE_TONEARESTFROMZERO", llvm::RoundingMode::NearestTiesToAway)
+ .Case("FE_DYNAMIC", llvm::RoundingMode::Dynamic)
+ .Default(llvm::RoundingMode::Invalid);
+ if (RM == llvm::RoundingMode::Invalid) {
+ PP.Diag(Tok.getLocation(), diag::warn_stdc_unknown_rounding_mode);
+ return;
+ }
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "STDC FENV_ROUND";
+ return;
+ }
+
+ // Until the pragma is fully implemented, issue a warning.
+ PP.Diag(Tok.getLocation(), diag::warn_stdc_fenv_round_not_supported);
+
+ MutableArrayRef<Token> Toks(PP.getPreprocessorAllocator().Allocate<Token>(1),
+ 1);
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_fenv_round);
+ Toks[0].setLocation(Tok.getLocation());
+ Toks[0].setAnnotationEndLoc(Tok.getLocation());
+ Toks[0].setAnnotationValue(
+ reinterpret_cast<void *>(static_cast<uintptr_t>(RM)));
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
+}
+
void Parser::HandlePragmaFP() {
assert(Tok.is(tok::annot_pragma_fp));
auto *AnnotValue =
reinterpret_cast<TokFPAnnotValue *>(Tok.getAnnotationValue());
- if (AnnotValue->FlagKind == TokFPAnnotValue::Reassociate)
- Actions.ActOnPragmaFPReassociate(
- Tok.getLocation(), AnnotValue->FlagValue == TokFPAnnotValue::On);
- else {
- LangOptions::FPModeKind FPC;
- switch (AnnotValue->FlagValue) {
- case TokFPAnnotValue::Off:
- FPC = LangOptions::FPM_Off;
- break;
- case TokFPAnnotValue::On:
- FPC = LangOptions::FPM_On;
- break;
- case TokFPAnnotValue::Fast:
- FPC = LangOptions::FPM_Fast;
- break;
- }
- Actions.ActOnPragmaFPContract(Tok.getLocation(), FPC);
- }
+ if (AnnotValue->ReassociateValue)
+ Actions.ActOnPragmaFPReassociate(Tok.getLocation(),
+ *AnnotValue->ReassociateValue ==
+ LangOptions::FPModeKind::FPM_On);
+ if (AnnotValue->ContractValue)
+ Actions.ActOnPragmaFPContract(Tok.getLocation(),
+ *AnnotValue->ContractValue);
+ if (AnnotValue->ExceptionsValue)
+ Actions.ActOnPragmaFPExceptions(Tok.getLocation(),
+ *AnnotValue->ExceptionsValue);
ConsumeAnnotationToken();
}
diff --git a/clang/lib/Parse/ParseStmt.cpp b/clang/lib/Parse/ParseStmt.cpp
index 89a6a2b829ae..26a02575010c 100644
--- a/clang/lib/Parse/ParseStmt.cpp
+++ b/clang/lib/Parse/ParseStmt.cpp
@@ -215,11 +215,10 @@ Retry:
DeclGroupPtrTy Decl;
if (GNUAttributeLoc.isValid()) {
DeclStart = GNUAttributeLoc;
- Decl = ParseDeclaration(DeclaratorContext::BlockContext, DeclEnd, Attrs,
+ Decl = ParseDeclaration(DeclaratorContext::Block, DeclEnd, Attrs,
&GNUAttributeLoc);
} else {
- Decl =
- ParseDeclaration(DeclaratorContext::BlockContext, DeclEnd, Attrs);
+ Decl = ParseDeclaration(DeclaratorContext::Block, DeclEnd, Attrs);
}
if (Attrs.Range.getBegin().isValid())
DeclStart = Attrs.Range.getBegin();
@@ -366,9 +365,16 @@ Retry:
case tok::annot_pragma_fenv_access:
ProhibitAttributes(Attrs);
- HandlePragmaFEnvAccess();
+ Diag(Tok, diag::err_pragma_stdc_fenv_access_scope);
+ ConsumeAnnotationToken();
return StmtEmpty();
+ case tok::annot_pragma_fenv_round:
+ ProhibitAttributes(Attrs);
+ Diag(Tok, diag::err_pragma_file_or_compound_scope) << "STDC FENV_ROUND";
+ ConsumeAnnotationToken();
+ return StmtError();
+
case tok::annot_pragma_float_control:
ProhibitAttributes(Attrs);
Diag(Tok, diag::err_pragma_file_or_compound_scope) << "float_control";
@@ -943,6 +949,9 @@ void Parser::ParseCompoundStatementLeadingPragmas() {
case tok::annot_pragma_fenv_access:
HandlePragmaFEnvAccess();
break;
+ case tok::annot_pragma_fenv_round:
+ HandlePragmaFEnvRound();
+ break;
case tok::annot_pragma_float_control:
HandlePragmaFloatControl();
break;
@@ -1024,9 +1033,9 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
Tok.getLocation(),
"in compound statement ('{}')");
- // Record the state of the FPFeatures, restore on leaving the
+ // Record the current FPFeatures, restore on leaving the
// compound statement.
- Sema::FPFeaturesStateRAII SaveFPContractState(Actions);
+ Sema::FPFeaturesStateRAII SaveFPFeatures(Actions);
InMessageExpressionRAIIObject InMessage(*this, false);
BalancedDelimiterTracker T(*this, tok::l_brace);
@@ -1037,6 +1046,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
// Parse any pragmas at the beginning of the compound statement.
ParseCompoundStatementLeadingPragmas();
+ Actions.ActOnAfterCompoundStatementLeadingPragmas();
StmtVector Stmts;
@@ -1108,7 +1118,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy Res =
- ParseDeclaration(DeclaratorContext::BlockContext, DeclEnd, attrs);
+ ParseDeclaration(DeclaratorContext::Block, DeclEnd, attrs);
R = Actions.ActOnDeclStmt(Res, DeclStart, DeclEnd);
} else {
// Otherwise this was a unary __extension__ marker.
@@ -1135,9 +1145,17 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
SourceLocation CloseLoc = Tok.getLocation();
// We broke out of the while loop because we found a '}' or EOF.
- if (!T.consumeClose())
+ if (!T.consumeClose()) {
+ // If this is the '})' of a statement expression, check that it's written
+ // in a sensible way.
+ if (isStmtExpr && Tok.is(tok::r_paren))
+ checkCompoundToken(CloseLoc, tok::r_brace, CompoundToken::StmtExprEnd);
+ } else {
// Recover by creating a compound statement with what we parsed so far,
- // instead of dropping everything and returning StmtError();
+ // instead of dropping everything and returning StmtError().
+ }
+
+ if (T.getCloseLocation().isValid())
CloseLoc = T.getCloseLocation();
return Actions.ActOnCompoundStmt(T.getOpenLocation(), CloseLoc,
@@ -1350,9 +1368,12 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
// Parse the condition.
StmtResult InitStmt;
Sema::ConditionResult Cond;
+ SourceLocation LParen;
+ SourceLocation RParen;
if (ParseParenExprOrCondition(&InitStmt, Cond, IfLoc,
IsConstexpr ? Sema::ConditionKind::ConstexprIf
- : Sema::ConditionKind::Boolean))
+ : Sema::ConditionKind::Boolean,
+ &LParen, &RParen))
return StmtError();
llvm::Optional<bool> ConstexprCondition;
@@ -1465,8 +1486,8 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
if (ElseStmt.isInvalid())
ElseStmt = Actions.ActOnNullStmt(ElseStmtLoc);
- return Actions.ActOnIfStmt(IfLoc, IsConstexpr, InitStmt.get(), Cond,
- ThenStmt.get(), ElseLoc, ElseStmt.get());
+ return Actions.ActOnIfStmt(IfLoc, IsConstexpr, LParen, InitStmt.get(), Cond,
+ RParen, ThenStmt.get(), ElseLoc, ElseStmt.get());
}
/// ParseSwitchStatement
@@ -1505,12 +1526,14 @@ StmtResult Parser::ParseSwitchStatement(SourceLocation *TrailingElseLoc) {
// Parse the condition.
StmtResult InitStmt;
Sema::ConditionResult Cond;
+ SourceLocation LParen;
+ SourceLocation RParen;
if (ParseParenExprOrCondition(&InitStmt, Cond, SwitchLoc,
- Sema::ConditionKind::Switch))
+ Sema::ConditionKind::Switch, &LParen, &RParen))
return StmtError();
- StmtResult Switch =
- Actions.ActOnStartOfSwitchStmt(SwitchLoc, InitStmt.get(), Cond);
+ StmtResult Switch = Actions.ActOnStartOfSwitchStmt(
+ SwitchLoc, LParen, InitStmt.get(), Cond, RParen);
if (Switch.isInvalid()) {
// Skip the switch body.
@@ -1848,7 +1871,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy DG = ParseSimpleDeclaration(
- DeclaratorContext::ForContext, DeclEnd, attrs, false,
+ DeclaratorContext::ForInit, DeclEnd, attrs, false,
MightBeForRangeStmt ? &ForRangeInfo : nullptr);
FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
if (ForRangeInfo.ParsedForRangeDecl()) {
@@ -2447,7 +2470,7 @@ StmtResult Parser::ParseCXXCatchBlock(bool FnCatch) {
if (ParseCXXTypeSpecifierSeq(DS))
return StmtError();
- Declarator ExDecl(DS, DeclaratorContext::CXXCatchContext);
+ Declarator ExDecl(DS, DeclaratorContext::CXXCatch);
ParseDeclarator(ExDecl);
ExceptionDecl = Actions.ActOnExceptionDeclarator(getCurScope(), ExDecl);
} else
diff --git a/clang/lib/Parse/ParseStmtAsm.cpp b/clang/lib/Parse/ParseStmtAsm.cpp
index 7d0818840a4f..bdf40c291cb6 100644
--- a/clang/lib/Parse/ParseStmtAsm.cpp
+++ b/clang/lib/Parse/ParseStmtAsm.cpp
@@ -581,7 +581,7 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
std::unique_ptr<llvm::MCSubtargetInfo> STI(
TheTarget->createMCSubtargetInfo(TT, TO.CPU, FeaturesStr));
// Target MCTargetDesc may not be linked in clang-based tools.
- if (!MAI || !MII | !MOFI || !STI) {
+ if (!MAI || !MII || !MOFI || !STI) {
Diag(AsmLoc, diag::err_msasm_unable_to_create_target)
<< "target MC unavailable";
return EmptyStmt();
diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp
index 3ef73f579123..828b9b2277ff 100644
--- a/clang/lib/Parse/ParseTemplate.cpp
+++ b/clang/lib/Parse/ParseTemplate.cpp
@@ -141,9 +141,8 @@ Decl *Parser::ParseTemplateDeclarationOrSpecialization(
if (TryConsumeToken(tok::kw_requires)) {
OptionalRequiresClauseConstraintER =
- Actions.CorrectDelayedTyposInExpr(
- ParseConstraintLogicalOrExpression(
- /*IsTrailingRequiresClause=*/false));
+ Actions.ActOnRequiresClause(ParseConstraintLogicalOrExpression(
+ /*IsTrailingRequiresClause=*/false));
if (!OptionalRequiresClauseConstraintER.isUsable()) {
// Skip until the semi-colon or a '}'.
SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
@@ -198,7 +197,7 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
return ParseStaticAssertDeclaration(DeclEnd);
}
- if (Context == DeclaratorContext::MemberContext) {
+ if (Context == DeclaratorContext::Member) {
// We are parsing a member template.
ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo,
&DiagsFromTParams);
@@ -279,7 +278,7 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
// Function definitions are only allowed at file scope and in C++ classes.
// The C++ inline method definition case is handled elsewhere, so we only
// need to handle the file scope definition case.
- if (Context != DeclaratorContext::FileContext) {
+ if (Context != DeclaratorContext::File) {
Diag(Tok, diag::err_function_definition_not_allowed);
SkipMalformedDecl();
return nullptr;
@@ -646,7 +645,7 @@ NamedDecl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
// probably meant to write the type of a NTTP.
DeclSpec DS(getAttrFactory());
DS.SetTypeSpecError();
- Declarator D(DS, DeclaratorContext::TemplateParamContext);
+ Declarator D(DS, DeclaratorContext::TemplateParam);
D.SetIdentifier(nullptr, Tok.getLocation());
D.setInvalidType(true);
NamedDecl *ErrorParam = Actions.ActOnNonTypeTemplateParameter(
@@ -821,9 +820,9 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
SourceLocation EqualLoc;
ParsedType DefaultArg;
if (TryConsumeToken(tok::equal, EqualLoc))
- DefaultArg = ParseTypeName(/*Range=*/nullptr,
- DeclaratorContext::TemplateTypeArgContext)
- .get();
+ DefaultArg =
+ ParseTypeName(/*Range=*/nullptr, DeclaratorContext::TemplateTypeArg)
+ .get();
NamedDecl *NewDecl = Actions.ActOnTypeParameter(getCurScope(),
TypenameKeyword, EllipsisLoc,
@@ -967,7 +966,7 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
DeclSpecContext::DSC_template_param);
// Parse this as a typename.
- Declarator ParamDecl(DS, DeclaratorContext::TemplateParamContext);
+ Declarator ParamDecl(DS, DeclaratorContext::TemplateParam);
ParseDeclarator(ParamDecl);
if (DS.getTypeSpecType() == DeclSpec::TST_unspecified) {
Diag(Tok.getLocation(), diag::err_expected_template_parameter);
@@ -1527,7 +1526,7 @@ ParsedTemplateArgument Parser::ParseTemplateArgument() {
/*ExprContext=*/Sema::ExpressionEvaluationContextRecord::EK_TemplateArgument);
if (isCXXTypeId(TypeIdAsTemplateArgument)) {
TypeResult TypeArg = ParseTypeName(
- /*Range=*/nullptr, DeclaratorContext::TemplateArgContext);
+ /*Range=*/nullptr, DeclaratorContext::TemplateArg);
return Actions.ActOnTemplateTypeArgument(TypeArg);
}
diff --git a/clang/lib/Parse/ParseTentative.cpp b/clang/lib/Parse/ParseTentative.cpp
index f026f3a1bfb2..3bf2bc455bfe 100644
--- a/clang/lib/Parse/ParseTentative.cpp
+++ b/clang/lib/Parse/ParseTentative.cpp
@@ -842,7 +842,8 @@ Parser::TPResult Parser::TryParsePtrOperatorSeq() {
while (Tok.isOneOf(tok::kw_const, tok::kw_volatile, tok::kw_restrict,
tok::kw__Nonnull, tok::kw__Nullable,
- tok::kw__Null_unspecified, tok::kw__Atomic))
+ tok::kw__Nullable_result, tok::kw__Null_unspecified,
+ tok::kw__Atomic))
ConsumeToken();
} else {
return TPResult::True;
@@ -1276,15 +1277,6 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// this is ambiguous. Typo-correct to type and expression keywords and
// to types and identifiers, in order to try to recover from errors.
TentativeParseCCC CCC(Next);
- // Tentative parsing may not be done in the right evaluation context
- // for the ultimate expression. Enter an unevaluated context to prevent
- // Sema from immediately e.g. treating this lookup as a potential ODR-use.
- // If we generate an expression annotation token and the parser actually
- // claims it as an expression, we'll transform the expression to a
- // potentially-evaluated one then.
- EnterExpressionEvaluationContext Unevaluated(
- Actions, Sema::ExpressionEvaluationContext::Unevaluated,
- Sema::ReuseLambdaContextDecl);
switch (TryAnnotateName(&CCC)) {
case ANK_Error:
return TPResult::Error;
@@ -1446,6 +1438,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw___unaligned:
case tok::kw__Nonnull:
case tok::kw__Nullable:
+ case tok::kw__Nullable_result:
case tok::kw__Null_unspecified:
case tok::kw___kindof:
return TPResult::True;
diff --git a/clang/lib/Parse/Parser.cpp b/clang/lib/Parse/Parser.cpp
index 764d4e8e9d52..9b0f921b4269 100644
--- a/clang/lib/Parse/Parser.cpp
+++ b/clang/lib/Parse/Parser.cpp
@@ -227,6 +227,38 @@ bool Parser::expectIdentifier() {
return true;
}
+void Parser::checkCompoundToken(SourceLocation FirstTokLoc,
+ tok::TokenKind FirstTokKind, CompoundToken Op) {
+ if (FirstTokLoc.isInvalid())
+ return;
+ SourceLocation SecondTokLoc = Tok.getLocation();
+
+ // If either token is in a macro, we expect both tokens to come from the same
+ // macro expansion.
+ if ((FirstTokLoc.isMacroID() || SecondTokLoc.isMacroID()) &&
+ PP.getSourceManager().getFileID(FirstTokLoc) !=
+ PP.getSourceManager().getFileID(SecondTokLoc)) {
+ Diag(FirstTokLoc, diag::warn_compound_token_split_by_macro)
+ << (FirstTokKind == Tok.getKind()) << FirstTokKind << Tok.getKind()
+ << static_cast<int>(Op) << SourceRange(FirstTokLoc);
+ Diag(SecondTokLoc, diag::note_compound_token_split_second_token_here)
+ << (FirstTokKind == Tok.getKind()) << Tok.getKind()
+ << SourceRange(SecondTokLoc);
+ return;
+ }
+
+ // We expect the tokens to abut.
+ if (Tok.hasLeadingSpace() || Tok.isAtStartOfLine()) {
+ SourceLocation SpaceLoc = PP.getLocForEndOfToken(FirstTokLoc);
+ if (SpaceLoc.isInvalid())
+ SpaceLoc = FirstTokLoc;
+ Diag(SpaceLoc, diag::warn_compound_token_split_by_whitespace)
+ << (FirstTokKind == Tok.getKind()) << FirstTokKind << Tok.getKind()
+ << static_cast<int>(Op) << SourceRange(FirstTokLoc, SecondTokLoc);
+ return;
+ }
+}
+
//===----------------------------------------------------------------------===//
// Error recovery.
//===----------------------------------------------------------------------===//
@@ -552,9 +584,10 @@ bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
// declaration. C++ doesn't have this restriction. We also don't want to
// complain if we have a precompiled header, although technically if the PCH
// is empty we should still emit the (pedantic) diagnostic.
+ // If the main file is a header, we're only pretending it's a TU; don't warn.
bool NoTopLevelDecls = ParseTopLevelDecl(Result, true);
if (NoTopLevelDecls && !Actions.getASTContext().getExternalSource() &&
- !getLangOpts().CPlusPlus)
+ !getLangOpts().CPlusPlus && !getLangOpts().IsHeaderFile)
Diag(diag::ext_empty_translation_unit);
return NoTopLevelDecls;
@@ -750,6 +783,9 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::annot_pragma_fenv_access:
HandlePragmaFEnvAccess();
return nullptr;
+ case tok::annot_pragma_fenv_round:
+ HandlePragmaFEnvRound();
+ return nullptr;
case tok::annot_pragma_float_control:
HandlePragmaFloatControl();
return nullptr;
@@ -865,7 +901,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// A function definition cannot start with any of these keywords.
{
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
}
case tok::kw_static:
@@ -875,7 +911,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 0;
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
}
goto dont_know;
@@ -886,7 +922,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// Inline namespaces. Allowed as an extension even in C++03.
if (NextKind == tok::kw_namespace) {
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
}
// Parse (then ignore) 'inline' prior to a template instantiation. This is
@@ -895,7 +931,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 1;
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
}
}
goto dont_know;
@@ -909,9 +945,8 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
diag::warn_cxx98_compat_extern_template :
diag::ext_extern_template) << SourceRange(ExternLoc, TemplateLoc);
SourceLocation DeclEnd;
- return Actions.ConvertDeclToDeclGroup(
- ParseExplicitInstantiation(DeclaratorContext::FileContext, ExternLoc,
- TemplateLoc, DeclEnd, attrs));
+ return Actions.ConvertDeclToDeclGroup(ParseExplicitInstantiation(
+ DeclaratorContext::File, ExternLoc, TemplateLoc, DeclEnd, attrs));
}
goto dont_know;
@@ -1092,11 +1127,11 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
if (getLangOpts().CPlusPlus && isTokenStringLiteral() &&
DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier) {
- Decl *TheDecl = ParseLinkage(DS, DeclaratorContext::FileContext);
+ Decl *TheDecl = ParseLinkage(DS, DeclaratorContext::File);
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- return ParseDeclGroup(DS, DeclaratorContext::FileContext);
+ return ParseDeclGroup(DS, DeclaratorContext::File);
}
Parser::DeclGroupPtrTy
@@ -1193,7 +1228,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
Scope::CompoundStmtScope);
Scope *ParentScope = getCurScope()->getParent();
- D.setFunctionDefinitionKind(FDK_Definition);
+ D.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
Decl *DP = Actions.HandleDeclarator(ParentScope, D,
TemplateParameterLists);
D.complete(DP);
@@ -1224,7 +1259,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
Scope::CompoundStmtScope);
Scope *ParentScope = getCurScope()->getParent();
- D.setFunctionDefinitionKind(FDK_Definition);
+ D.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
Decl *FuncDecl = Actions.HandleDeclarator(ParentScope, D,
MultiTemplateParamsArg());
D.complete(FuncDecl);
@@ -1405,7 +1440,7 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
}
// Parse the first declarator attached to this declspec.
- Declarator ParmDeclarator(DS, DeclaratorContext::KNRTypeListContext);
+ Declarator ParmDeclarator(DS, DeclaratorContext::KNRTypeList);
ParseDeclarator(ParmDeclarator);
// Handle the full declarator list.
@@ -1691,9 +1726,8 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
return ANK_Success;
}
- case Sema::NC_ContextIndependentExpr:
- Tok.setKind(Actions.isUnevaluatedContext() ? tok::annot_uneval_primary_expr
- : tok::annot_primary_expr);
+ case Sema::NC_OverloadSet:
+ Tok.setKind(tok::annot_overload_set);
setExprAnnotation(Tok, Classification.getExpression());
Tok.setAnnotationEndLoc(NameLoc);
if (SS.isNotEmpty())
diff --git a/clang/lib/Rewrite/HTMLRewrite.cpp b/clang/lib/Rewrite/HTMLRewrite.cpp
index e304fbbed729..2f5f2734aa46 100644
--- a/clang/lib/Rewrite/HTMLRewrite.cpp
+++ b/clang/lib/Rewrite/HTMLRewrite.cpp
@@ -107,9 +107,9 @@ void html::HighlightRange(RewriteBuffer &RB, unsigned B, unsigned E,
void html::EscapeText(Rewriter &R, FileID FID,
bool EscapeSpaces, bool ReplaceTabs) {
- const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
- const char* C = Buf->getBufferStart();
- const char* FileEnd = Buf->getBufferEnd();
+ llvm::MemoryBufferRef Buf = R.getSourceMgr().getBufferOrFake(FID);
+ const char* C = Buf.getBufferStart();
+ const char* FileEnd = Buf.getBufferEnd();
assert (C <= FileEnd);
@@ -226,9 +226,9 @@ static void AddLineNumber(RewriteBuffer &RB, unsigned LineNo,
void html::AddLineNumbers(Rewriter& R, FileID FID) {
- const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
- const char* FileBeg = Buf->getBufferStart();
- const char* FileEnd = Buf->getBufferEnd();
+ llvm::MemoryBufferRef Buf = R.getSourceMgr().getBufferOrFake(FID);
+ const char* FileBeg = Buf.getBufferStart();
+ const char* FileEnd = Buf.getBufferEnd();
const char* C = FileBeg;
RewriteBuffer &RB = R.getEditBuffer(FID);
@@ -274,9 +274,9 @@ void html::AddLineNumbers(Rewriter& R, FileID FID) {
void html::AddHeaderFooterInternalBuiltinCSS(Rewriter &R, FileID FID,
StringRef title) {
- const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
- const char* FileStart = Buf->getBufferStart();
- const char* FileEnd = Buf->getBufferEnd();
+ llvm::MemoryBufferRef Buf = R.getSourceMgr().getBufferOrFake(FID);
+ const char* FileStart = Buf.getBufferStart();
+ const char* FileEnd = Buf.getBufferEnd();
SourceLocation StartLoc = R.getSourceMgr().getLocForStartOfFile(FID);
SourceLocation EndLoc = StartLoc.getLocWithOffset(FileEnd-FileStart);
@@ -445,7 +445,7 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
RewriteBuffer &RB = R.getEditBuffer(FID);
const SourceManager &SM = PP.getSourceManager();
- const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(FID);
Lexer L(FID, FromFile, SM, PP.getLangOpts());
const char *BufferStart = L.getBuffer().data();
@@ -536,7 +536,7 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
const SourceManager &SM = PP.getSourceManager();
std::vector<Token> TokenStream;
- const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(FID);
Lexer L(FID, FromFile, SM, PP.getLangOpts());
// Lex all the tokens in raw mode, to avoid entering #includes or expanding
diff --git a/clang/lib/Rewrite/Rewriter.cpp b/clang/lib/Rewrite/Rewriter.cpp
index 33718b7721ce..040e1c284253 100644
--- a/clang/lib/Rewrite/Rewriter.cpp
+++ b/clang/lib/Rewrite/Rewriter.cpp
@@ -263,8 +263,8 @@ bool Rewriter::InsertText(SourceLocation Loc, StringRef Str,
StringRef MB = SourceMgr->getBufferData(FID);
unsigned lineNo = SourceMgr->getLineNumber(FID, StartOffs) - 1;
- const SrcMgr::ContentCache *
- Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache();
+ const SrcMgr::ContentCache *Content =
+ &SourceMgr->getSLocEntry(FID).getFile().getContentCache();
unsigned lineOffs = Content->SourceLineCache[lineNo];
// Find the whitespace at the start of the line.
@@ -367,8 +367,8 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
unsigned startLineNo = SourceMgr->getLineNumber(FID, StartOff) - 1;
unsigned endLineNo = SourceMgr->getLineNumber(FID, EndOff) - 1;
- const SrcMgr::ContentCache *
- Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache();
+ const SrcMgr::ContentCache *Content =
+ &SourceMgr->getSLocEntry(FID).getFile().getContentCache();
// Find where the lines start.
unsigned parentLineOffs = Content->SourceLineCache[parentLineNo];
diff --git a/clang/lib/Rewrite/TokenRewriter.cpp b/clang/lib/Rewrite/TokenRewriter.cpp
index 538622e36b38..b1f4bd251580 100644
--- a/clang/lib/Rewrite/TokenRewriter.cpp
+++ b/clang/lib/Rewrite/TokenRewriter.cpp
@@ -28,7 +28,7 @@ TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM,
ScratchBuf.reset(new ScratchBuffer(SM));
// Create a lexer to lex all the tokens of the main file in raw mode.
- const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(FID);
Lexer RawLex(FID, FromFile, SM, LangOpts);
// Return all comments and whitespace as tokens.
diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp
index 3b7356893833..edd9742ed207 100644
--- a/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
+#include "clang/Analysis/Analyses/CalledOnceCheck.h"
#include "clang/Analysis/Analyses/Consumed.h"
#include "clang/Analysis/Analyses/ReachableCode.h"
#include "clang/Analysis/Analyses/ThreadSafety.h"
@@ -36,6 +37,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallString.h"
@@ -1623,6 +1625,82 @@ private:
});
}
};
+
+class CalledOnceCheckReporter : public CalledOnceCheckHandler {
+public:
+ CalledOnceCheckReporter(Sema &S) : S(S) {}
+ void handleDoubleCall(const ParmVarDecl *Parameter, const Expr *Call,
+ const Expr *PrevCall, bool IsCompletionHandler,
+ bool Poised) override {
+ auto DiagToReport = IsCompletionHandler
+ ? diag::warn_completion_handler_called_twice
+ : diag::warn_called_once_gets_called_twice;
+ S.Diag(Call->getBeginLoc(), DiagToReport) << Parameter;
+ S.Diag(PrevCall->getBeginLoc(), diag::note_called_once_gets_called_twice)
+ << Poised;
+ }
+
+ void handleNeverCalled(const ParmVarDecl *Parameter,
+ bool IsCompletionHandler) override {
+ auto DiagToReport = IsCompletionHandler
+ ? diag::warn_completion_handler_never_called
+ : diag::warn_called_once_never_called;
+ S.Diag(Parameter->getBeginLoc(), DiagToReport)
+ << Parameter << /* Captured */ false;
+ }
+
+ void handleNeverCalled(const ParmVarDecl *Parameter, const Stmt *Where,
+ NeverCalledReason Reason, bool IsCalledDirectly,
+ bool IsCompletionHandler) override {
+ auto DiagToReport = IsCompletionHandler
+ ? diag::warn_completion_handler_never_called_when
+ : diag::warn_called_once_never_called_when;
+ S.Diag(Where->getBeginLoc(), DiagToReport)
+ << Parameter << IsCalledDirectly << (unsigned)Reason;
+ }
+
+ void handleCapturedNeverCalled(const ParmVarDecl *Parameter,
+ const Decl *Where,
+ bool IsCompletionHandler) override {
+ auto DiagToReport = IsCompletionHandler
+ ? diag::warn_completion_handler_never_called
+ : diag::warn_called_once_never_called;
+ S.Diag(Where->getBeginLoc(), DiagToReport)
+ << Parameter << /* Captured */ true;
+ }
+
+private:
+ Sema &S;
+};
+
+constexpr unsigned CalledOnceWarnings[] = {
+ diag::warn_called_once_never_called,
+ diag::warn_called_once_never_called_when,
+ diag::warn_called_once_gets_called_twice};
+
+constexpr unsigned CompletionHandlerWarnings[]{
+ diag::warn_completion_handler_never_called,
+ diag::warn_completion_handler_never_called_when,
+ diag::warn_completion_handler_called_twice};
+
+bool shouldAnalyzeCalledOnceImpl(llvm::ArrayRef<unsigned> DiagIDs,
+ const DiagnosticsEngine &Diags,
+ SourceLocation At) {
+ return llvm::any_of(DiagIDs, [&Diags, At](unsigned DiagID) {
+ return !Diags.isIgnored(DiagID, At);
+ });
+}
+
+bool shouldAnalyzeCalledOnceConventions(const DiagnosticsEngine &Diags,
+ SourceLocation At) {
+ return shouldAnalyzeCalledOnceImpl(CompletionHandlerWarnings, Diags, At);
+}
+
+bool shouldAnalyzeCalledOnceParameters(const DiagnosticsEngine &Diags,
+ SourceLocation At) {
+ return shouldAnalyzeCalledOnceImpl(CalledOnceWarnings, Diags, At) ||
+ shouldAnalyzeCalledOnceConventions(Diags, At);
+}
} // anonymous namespace
namespace clang {
@@ -1849,8 +1927,8 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
<< *PossibleMatch);
if (Verbose && POK == POK_VarAccess) {
PartialDiagnosticAt VNote(D->getLocation(),
- S.PDiag(diag::note_guarded_by_declared_here)
- << D->getNameAsString());
+ S.PDiag(diag::note_guarded_by_declared_here)
+ << D->getDeclName());
Warnings.emplace_back(std::move(Warning), getNotes(Note, VNote));
} else
Warnings.emplace_back(std::move(Warning), getNotes(Note));
@@ -1892,6 +1970,13 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
Warnings.emplace_back(std::move(Warning), getNotes());
}
+ void handleNegativeNotHeld(const NamedDecl *D, Name LockName,
+ SourceLocation Loc) override {
+ PartialDiagnosticAt Warning(
+ Loc, S.PDiag(diag::warn_fun_requires_negative_cap) << D << LockName);
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+
void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName,
SourceLocation Loc) override {
PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_fun_excludes_mutex)
@@ -2089,7 +2174,7 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
if (cast<DeclContext>(D)->isDependentContext())
return;
- if (Diags.hasUncompilableErrorOccurred()) {
+ if (S.hasUncompilableErrorOccurred()) {
// Flush out any possibly unreachable diagnostics.
flushDiagnostics(S, fscope);
return;
@@ -2257,6 +2342,17 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
}
}
+ // Check for violations of "called once" parameter properties.
+ if (S.getLangOpts().ObjC &&
+ shouldAnalyzeCalledOnceParameters(Diags, D->getBeginLoc())) {
+ if (AC.getCFG()) {
+ CalledOnceCheckReporter Reporter(S);
+ checkCalledOnceParameters(
+ AC, Reporter,
+ shouldAnalyzeCalledOnceConventions(Diags, D->getBeginLoc()));
+ }
+ }
+
bool FallThroughDiagFull =
!Diags.isIgnored(diag::warn_unannotated_fallthrough, D->getBeginLoc());
bool FallThroughDiagPerFunction = !Diags.isIgnored(
diff --git a/clang/lib/Sema/CodeCompleteConsumer.cpp b/clang/lib/Sema/CodeCompleteConsumer.cpp
index f1ad8aeaacbb..678a09ba1003 100644
--- a/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -356,8 +356,7 @@ const char *CodeCompletionAllocator::CopyString(const Twine &String) {
}
StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
- const NamedDecl *ND = dyn_cast<NamedDecl>(DC);
- if (!ND)
+ if (!isa<NamedDecl>(DC))
return {};
// Check whether we've already cached the parent name.
@@ -470,8 +469,7 @@ void CodeCompletionBuilder::addParentContext(const DeclContext *DC) {
if (DC->isFunctionOrMethod())
return;
- const NamedDecl *ND = dyn_cast<NamedDecl>(DC);
- if (!ND)
+ if (!isa<NamedDecl>(DC))
return;
ParentName = getCodeCompletionTUInfo().getParentName(DC);
diff --git a/clang/lib/Sema/DeclSpec.cpp b/clang/lib/Sema/DeclSpec.cpp
index f4c30c90ad27..da42db3e8f7b 100644
--- a/clang/lib/Sema/DeclSpec.cpp
+++ b/clang/lib/Sema/DeclSpec.cpp
@@ -181,6 +181,8 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
SourceLocation LocalRangeEnd,
Declarator &TheDeclarator,
TypeResult TrailingReturnType,
+ SourceLocation
+ TrailingReturnTypeLoc,
DeclSpec *MethodQualifiers) {
assert(!(MethodQualifiers && MethodQualifiers->getTypeQualifiers() & DeclSpec::TQ_atomic) &&
"function cannot have _Atomic qualifier");
@@ -189,27 +191,29 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
I.Kind = Function;
I.Loc = LocalRangeBegin;
I.EndLoc = LocalRangeEnd;
+ new (&I.Fun) FunctionTypeInfo;
I.Fun.hasPrototype = hasProto;
I.Fun.isVariadic = EllipsisLoc.isValid();
I.Fun.isAmbiguous = isAmbiguous;
- I.Fun.LParenLoc = LParenLoc.getRawEncoding();
- I.Fun.EllipsisLoc = EllipsisLoc.getRawEncoding();
- I.Fun.RParenLoc = RParenLoc.getRawEncoding();
+ I.Fun.LParenLoc = LParenLoc;
+ I.Fun.EllipsisLoc = EllipsisLoc;
+ I.Fun.RParenLoc = RParenLoc;
I.Fun.DeleteParams = false;
I.Fun.NumParams = NumParams;
I.Fun.Params = nullptr;
I.Fun.RefQualifierIsLValueRef = RefQualifierIsLvalueRef;
- I.Fun.RefQualifierLoc = RefQualifierLoc.getRawEncoding();
- I.Fun.MutableLoc = MutableLoc.getRawEncoding();
+ I.Fun.RefQualifierLoc = RefQualifierLoc;
+ I.Fun.MutableLoc = MutableLoc;
I.Fun.ExceptionSpecType = ESpecType;
- I.Fun.ExceptionSpecLocBeg = ESpecRange.getBegin().getRawEncoding();
- I.Fun.ExceptionSpecLocEnd = ESpecRange.getEnd().getRawEncoding();
+ I.Fun.ExceptionSpecLocBeg = ESpecRange.getBegin();
+ I.Fun.ExceptionSpecLocEnd = ESpecRange.getEnd();
I.Fun.NumExceptionsOrDecls = 0;
I.Fun.Exceptions = nullptr;
I.Fun.NoexceptExpr = nullptr;
I.Fun.HasTrailingReturnType = TrailingReturnType.isUsable() ||
TrailingReturnType.isInvalid();
I.Fun.TrailingReturnType = TrailingReturnType.get();
+ I.Fun.TrailingReturnTypeLoc = TrailingReturnTypeLoc;
I.Fun.MethodQualifiers = nullptr;
I.Fun.QualAttrFactory = nullptr;
@@ -405,7 +409,7 @@ bool Declarator::isDeclarationOfFunction() const {
}
bool Declarator::isStaticMember() {
- assert(getContext() == DeclaratorContext::MemberContext);
+ assert(getContext() == DeclaratorContext::Member);
return getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
(getName().Kind == UnqualifiedIdKind::IK_OperatorFunctionId &&
CXXMethodDecl::isStaticOverloadedOperator(
@@ -499,12 +503,16 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TSCS S) {
llvm_unreachable("Unknown typespec!");
}
-const char *DeclSpec::getSpecifierName(TSW W) {
+const char *DeclSpec::getSpecifierName(TypeSpecifierWidth W) {
switch (W) {
- case TSW_unspecified: return "unspecified";
- case TSW_short: return "short";
- case TSW_long: return "long";
- case TSW_longlong: return "long long";
+ case TypeSpecifierWidth::Unspecified:
+ return "unspecified";
+ case TypeSpecifierWidth::Short:
+ return "short";
+ case TypeSpecifierWidth::Long:
+ return "long";
+ case TypeSpecifierWidth::LongLong:
+ return "long long";
}
llvm_unreachable("Unknown typespec!");
}
@@ -518,12 +526,14 @@ const char *DeclSpec::getSpecifierName(TSC C) {
llvm_unreachable("Unknown typespec!");
}
-
-const char *DeclSpec::getSpecifierName(TSS S) {
+const char *DeclSpec::getSpecifierName(TypeSpecifierSign S) {
switch (S) {
- case TSS_unspecified: return "unspecified";
- case TSS_signed: return "signed";
- case TSS_unsigned: return "unsigned";
+ case TypeSpecifierSign::Unspecified:
+ return "unspecified";
+ case TypeSpecifierSign::Signed:
+ return "signed";
+ case TypeSpecifierSign::Unsigned:
+ return "unsigned";
}
llvm_unreachable("Unknown typespec!");
}
@@ -579,10 +589,14 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
const char *DeclSpec::getSpecifierName(ConstexprSpecKind C) {
switch (C) {
- case CSK_unspecified: return "unspecified";
- case CSK_constexpr: return "constexpr";
- case CSK_consteval: return "consteval";
- case CSK_constinit: return "constinit";
+ case ConstexprSpecKind::Unspecified:
+ return "unspecified";
+ case ConstexprSpecKind::Constexpr:
+ return "constexpr";
+ case ConstexprSpecKind::Consteval:
+ return "consteval";
+ case ConstexprSpecKind::Constinit:
+ return "constinit";
}
llvm_unreachable("Unknown ConstexprSpecKind");
}
@@ -675,18 +689,18 @@ bool DeclSpec::SetStorageClassSpecThread(TSCS TSC, SourceLocation Loc,
/// These methods set the specified attribute of the DeclSpec, but return true
/// and ignore the request if invalid (e.g. "extern" then "auto" is
/// specified).
-bool DeclSpec::SetTypeSpecWidth(TSW W, SourceLocation Loc,
- const char *&PrevSpec,
- unsigned &DiagID,
+bool DeclSpec::SetTypeSpecWidth(TypeSpecifierWidth W, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy) {
// Overwrite TSWRange.Begin only if TypeSpecWidth was unspecified, so that
// for 'long long' we will keep the source location of the first 'long'.
- if (TypeSpecWidth == TSW_unspecified)
+ if (getTypeSpecWidth() == TypeSpecifierWidth::Unspecified)
TSWRange.setBegin(Loc);
// Allow turning long -> long long.
- else if (W != TSW_longlong || TypeSpecWidth != TSW_long)
- return BadSpecifier(W, (TSW)TypeSpecWidth, PrevSpec, DiagID);
- TypeSpecWidth = W;
+ else if (W != TypeSpecifierWidth::LongLong ||
+ getTypeSpecWidth() != TypeSpecifierWidth::Long)
+ return BadSpecifier(W, getTypeSpecWidth(), PrevSpec, DiagID);
+ TypeSpecWidth = static_cast<unsigned>(W);
// Remember location of the last 'long'
TSWRange.setEnd(Loc);
return false;
@@ -702,12 +716,11 @@ bool DeclSpec::SetTypeSpecComplex(TSC C, SourceLocation Loc,
return false;
}
-bool DeclSpec::SetTypeSpecSign(TSS S, SourceLocation Loc,
- const char *&PrevSpec,
- unsigned &DiagID) {
- if (TypeSpecSign != TSS_unspecified)
- return BadSpecifier(S, (TSS)TypeSpecSign, PrevSpec, DiagID);
- TypeSpecSign = S;
+bool DeclSpec::SetTypeSpecSign(TypeSpecifierSign S, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID) {
+ if (getTypeSpecSign() != TypeSpecifierSign::Unspecified)
+ return BadSpecifier(S, getTypeSpecSign(), PrevSpec, DiagID);
+ TypeSpecSign = static_cast<unsigned>(S);
TSSLoc = Loc;
return false;
}
@@ -870,7 +883,7 @@ bool DeclSpec::SetTypePipe(bool isPipe, SourceLocation Loc,
}
if (isPipe) {
- TypeSpecPipe = TSP_pipe;
+ TypeSpecPipe = static_cast<unsigned>(TypeSpecifiersPipe::Pipe);
}
return false;
}
@@ -1014,9 +1027,6 @@ bool DeclSpec::setFunctionSpecExplicit(SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
ExplicitSpecifier ExplicitSpec,
SourceLocation CloseParenLoc) {
- assert((ExplicitSpec.getKind() == ExplicitSpecKind::ResolvedTrue ||
- ExplicitSpec.getExpr()) &&
- "invalid ExplicitSpecifier");
// 'explicit explicit' is ok, but warn as this is likely not what the user
// intended.
if (hasExplicitSpecifier()) {
@@ -1080,17 +1090,17 @@ bool DeclSpec::setModulePrivateSpec(SourceLocation Loc, const char *&PrevSpec,
bool DeclSpec::SetConstexprSpec(ConstexprSpecKind ConstexprKind,
SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID) {
- if (getConstexprSpecifier() != CSK_unspecified)
+ if (getConstexprSpecifier() != ConstexprSpecKind::Unspecified)
return BadSpecifier(ConstexprKind, getConstexprSpecifier(), PrevSpec,
DiagID);
- ConstexprSpecifier = ConstexprKind;
+ ConstexprSpecifier = static_cast<unsigned>(ConstexprKind);
ConstexprLoc = Loc;
return false;
}
void DeclSpec::SaveWrittenBuiltinSpecs() {
- writtenBS.Sign = getTypeSpecSign();
- writtenBS.Width = getTypeSpecWidth();
+ writtenBS.Sign = static_cast<int>(getTypeSpecSign());
+ writtenBS.Width = static_cast<int>(getTypeSpecWidth());
writtenBS.Type = getTypeSpecType();
// Search the list of attributes for the presence of a mode attribute.
writtenBS.ModeAttr = getAttributes().hasAttribute(ParsedAttr::AT_Mode);
@@ -1111,9 +1121,9 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// If decltype(auto) is used, no other type specifiers are permitted.
if (TypeSpecType == TST_decltype_auto &&
- (TypeSpecWidth != TSW_unspecified ||
+ (getTypeSpecWidth() != TypeSpecifierWidth::Unspecified ||
TypeSpecComplex != TSC_unspecified ||
- TypeSpecSign != TSS_unspecified ||
+ getTypeSpecSign() != TypeSpecifierSign::Unspecified ||
TypeAltiVecVector || TypeAltiVecPixel || TypeAltiVecBool ||
TypeQualifiers)) {
const unsigned NumLocs = 9;
@@ -1132,9 +1142,9 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
Hints[I] = FixItHint::CreateRemoval(ExtraLocs[I]);
}
}
- TypeSpecWidth = TSW_unspecified;
+ TypeSpecWidth = static_cast<unsigned>(TypeSpecifierWidth::Unspecified);
TypeSpecComplex = TSC_unspecified;
- TypeSpecSign = TSS_unspecified;
+ TypeSpecSign = static_cast<unsigned>(TypeSpecifierSign::Unspecified);
TypeAltiVecVector = TypeAltiVecPixel = TypeAltiVecBool = false;
TypeQualifiers = 0;
S.Diag(TSTLoc, diag::err_decltype_auto_cannot_be_combined)
@@ -1146,9 +1156,9 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
if (TypeAltiVecVector) {
if (TypeAltiVecBool) {
// Sign specifiers are not allowed with vector bool. (PIM 2.1)
- if (TypeSpecSign != TSS_unspecified) {
+ if (getTypeSpecSign() != TypeSpecifierSign::Unspecified) {
S.Diag(TSSLoc, diag::err_invalid_vector_bool_decl_spec)
- << getSpecifierName((TSS)TypeSpecSign);
+ << getSpecifierName(getTypeSpecSign());
}
// Only char/int are valid with vector bool prior to Power10.
// Power10 adds instructions that produce vector bool data
@@ -1166,13 +1176,14 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
S.Diag(TSTLoc, diag::err_invalid_vector_bool_int128_decl_spec);
// Only 'short' and 'long long' are valid with vector bool. (PIM 2.1)
- if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short) &&
- (TypeSpecWidth != TSW_longlong))
+ if ((getTypeSpecWidth() != TypeSpecifierWidth::Unspecified) &&
+ (getTypeSpecWidth() != TypeSpecifierWidth::Short) &&
+ (getTypeSpecWidth() != TypeSpecifierWidth::LongLong))
S.Diag(TSWRange.getBegin(), diag::err_invalid_vector_bool_decl_spec)
- << getSpecifierName((TSW)TypeSpecWidth);
+ << getSpecifierName(getTypeSpecWidth());
// vector bool long long requires VSX support or ZVector.
- if ((TypeSpecWidth == TSW_longlong) &&
+ if ((getTypeSpecWidth() == TypeSpecifierWidth::LongLong) &&
(!S.Context.getTargetInfo().hasFeature("vsx")) &&
(!S.Context.getTargetInfo().hasFeature("power8-vector")) &&
!S.getLangOpts().ZVector)
@@ -1180,12 +1191,14 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// Elements of vector bool are interpreted as unsigned. (PIM 2.1)
if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) ||
- (TypeSpecType == TST_int128) || (TypeSpecWidth != TSW_unspecified))
- TypeSpecSign = TSS_unsigned;
+ (TypeSpecType == TST_int128) ||
+ (getTypeSpecWidth() != TypeSpecifierWidth::Unspecified))
+ TypeSpecSign = static_cast<unsigned>(TypeSpecifierSign::Unsigned);
} else if (TypeSpecType == TST_double) {
// vector long double and vector long long double are never allowed.
// vector double is OK for Power7 and later, and ZVector.
- if (TypeSpecWidth == TSW_long || TypeSpecWidth == TSW_longlong)
+ if (getTypeSpecWidth() == TypeSpecifierWidth::Long ||
+ getTypeSpecWidth() == TypeSpecifierWidth::LongLong)
S.Diag(TSWRange.getBegin(),
diag::err_invalid_vector_long_double_decl_spec);
else if (!S.Context.getTargetInfo().hasFeature("vsx") &&
@@ -1197,9 +1210,15 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
if (S.getLangOpts().ZVector &&
!S.Context.getTargetInfo().hasFeature("arch12"))
S.Diag(TSTLoc, diag::err_invalid_vector_float_decl_spec);
- } else if (TypeSpecWidth == TSW_long) {
+ } else if (getTypeSpecWidth() == TypeSpecifierWidth::Long) {
// vector long is unsupported for ZVector and deprecated for AltiVec.
- if (S.getLangOpts().ZVector)
+ // It has also been historically deprecated on AIX (as an alias for
+ // "vector int" in both 32-bit and 64-bit modes). It was then made
+ // unsupported in the Clang-based XL compiler since the deprecated type
+ // has a number of conflicting semantics and continuing to support it
+ // is a disservice to users.
+ if (S.getLangOpts().ZVector ||
+ S.Context.getTargetInfo().getTriple().isOSAIX())
S.Diag(TSWRange.getBegin(), diag::err_invalid_vector_long_decl_spec);
else
S.Diag(TSWRange.getBegin(),
@@ -1210,8 +1229,8 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
if (TypeAltiVecPixel) {
//TODO: perform validation
TypeSpecType = TST_int;
- TypeSpecSign = TSS_unsigned;
- TypeSpecWidth = TSW_short;
+ TypeSpecSign = static_cast<unsigned>(TypeSpecifierSign::Unsigned);
+ TypeSpecWidth = static_cast<unsigned>(TypeSpecifierWidth::Short);
TypeSpecOwned = false;
}
}
@@ -1220,7 +1239,7 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
TypeSpecType == TST_accum || TypeSpecType == TST_fract;
// signed/unsigned are only valid with int/char/wchar_t/_Accum.
- if (TypeSpecSign != TSS_unspecified) {
+ if (getTypeSpecSign() != TypeSpecifierSign::Unspecified) {
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
@@ -1229,19 +1248,21 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
S.Diag(TSSLoc, diag::err_invalid_sign_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
// signed double -> double.
- TypeSpecSign = TSS_unspecified;
+ TypeSpecSign = static_cast<unsigned>(TypeSpecifierSign::Unspecified);
}
}
// Validate the width of the type.
- switch (TypeSpecWidth) {
- case TSW_unspecified: break;
- case TSW_short: // short int
- case TSW_longlong: // long long int
+ switch (getTypeSpecWidth()) {
+ case TypeSpecifierWidth::Unspecified:
+ break;
+ case TypeSpecifierWidth::Short: // short int
+ case TypeSpecifierWidth::LongLong: // long long int
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // short -> short int, long long -> long long int.
else if (!(TypeSpecType == TST_int ||
- (IsFixedPointType && TypeSpecWidth != TSW_longlong))) {
+ (IsFixedPointType &&
+ getTypeSpecWidth() != TypeSpecifierWidth::LongLong))) {
S.Diag(TSWRange.getBegin(), diag::err_invalid_width_spec)
<< (int)TypeSpecWidth << getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecType = TST_int;
@@ -1249,7 +1270,7 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
TypeSpecOwned = false;
}
break;
- case TSW_long: // long double, long int
+ case TypeSpecifierWidth::Long: // long double, long int
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // long -> long int.
else if (TypeSpecType != TST_int && TypeSpecType != TST_double &&
@@ -1279,6 +1300,7 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
S.Diag(TSTLoc, diag::ext_integer_complex);
} else if (TypeSpecType != TST_float && TypeSpecType != TST_double &&
TypeSpecType != TST_float128) {
+ // FIXME: _Float16, __fp16?
S.Diag(TSCLoc, diag::err_invalid_complex_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecComplex = TSC_unspecified;
@@ -1337,11 +1359,11 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
else if (TypeSpecType == TST_char16 || TypeSpecType == TST_char32)
S.Diag(TSTLoc, diag::warn_cxx98_compat_unicode_type)
<< (TypeSpecType == TST_char16 ? "char16_t" : "char32_t");
- if (getConstexprSpecifier() == CSK_constexpr)
+ if (getConstexprSpecifier() == ConstexprSpecKind::Constexpr)
S.Diag(ConstexprLoc, diag::warn_cxx98_compat_constexpr);
- else if (getConstexprSpecifier() == CSK_consteval)
+ else if (getConstexprSpecifier() == ConstexprSpecKind::Consteval)
S.Diag(ConstexprLoc, diag::warn_cxx20_compat_consteval);
- else if (getConstexprSpecifier() == CSK_constinit)
+ else if (getConstexprSpecifier() == ConstexprSpecKind::Constinit)
S.Diag(ConstexprLoc, diag::warn_cxx20_compat_constinit);
// C++ [class.friend]p6:
// No storage-class-specifier shall appear in the decl-specifier-seq
@@ -1422,9 +1444,10 @@ void UnqualifiedId::setOperatorFunctionId(SourceLocation OperatorLoc,
Kind = UnqualifiedIdKind::IK_OperatorFunctionId;
StartLocation = OperatorLoc;
EndLocation = OperatorLoc;
+ new (&OperatorFunctionId) struct OFI;
OperatorFunctionId.Operator = Op;
for (unsigned I = 0; I != 3; ++I) {
- OperatorFunctionId.SymbolLocations[I] = SymbolLocations[I].getRawEncoding();
+ OperatorFunctionId.SymbolLocations[I] = SymbolLocations[I];
if (SymbolLocations[I].isValid())
EndLocation = SymbolLocations[I];
diff --git a/clang/lib/Sema/JumpDiagnostics.cpp b/clang/lib/Sema/JumpDiagnostics.cpp
index b34243edea35..d33b14a79dc1 100644
--- a/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/clang/lib/Sema/JumpDiagnostics.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenMP.h"
#include "llvm/ADT/BitVector.h"
using namespace clang;
@@ -580,6 +581,17 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
break;
default:
+ if (auto *ED = dyn_cast<OMPExecutableDirective>(S)) {
+ if (!ED->isStandaloneDirective()) {
+ unsigned NewParentScope = Scopes.size();
+ Scopes.emplace_back(ParentScope,
+ diag::note_omp_protected_structured_block,
+ diag::note_omp_exits_structured_block,
+ ED->getStructuredBlock()->getBeginLoc());
+ BuildScopeInformation(ED->getStructuredBlock(), NewParentScope);
+ return;
+ }
+ }
break;
}
@@ -904,6 +916,11 @@ void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
S.Diag(From->getBeginLoc(), diag::warn_jump_out_of_seh_finally);
break;
}
+ if (Scopes[I].InDiag == diag::note_omp_protected_structured_block) {
+ S.Diag(From->getBeginLoc(), diag::err_goto_into_protected_scope);
+ S.Diag(To->getBeginLoc(), diag::note_omp_exits_structured_block);
+ break;
+ }
}
}
diff --git a/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/clang/lib/Sema/MultiplexExternalSemaSource.cpp
index 80333e63127e..252008cda15d 100644
--- a/clang/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/clang/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -172,13 +172,6 @@ Module *MultiplexExternalSemaSource::getModule(unsigned ID) {
return nullptr;
}
-bool MultiplexExternalSemaSource::DeclIsFromPCHWithObjectFile(const Decl *D) {
- for (auto *S : Sources)
- if (S->DeclIsFromPCHWithObjectFile(D))
- return true;
- return false;
-}
-
bool MultiplexExternalSemaSource::layoutRecordType(const RecordDecl *Record,
uint64_t &Size,
uint64_t &Alignment,
diff --git a/clang/lib/Sema/ScopeInfo.cpp b/clang/lib/Sema/ScopeInfo.cpp
index b2a26af9b4a5..4857346018ae 100644
--- a/clang/lib/Sema/ScopeInfo.cpp
+++ b/clang/lib/Sema/ScopeInfo.cpp
@@ -29,6 +29,7 @@ void FunctionScopeInfo::Clear() {
HasDroppedStmt = false;
HasOMPDeclareReductionCombiner = false;
HasFallthroughStmt = false;
+ UsesFPIntrin = false;
HasPotentialAvailabilityViolations = false;
ObjCShouldCallSuper = false;
ObjCIsDesignatedInit = false;
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index 2f2b52106f3d..55cb3aee6194 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -42,7 +42,7 @@
#include "clang/Sema/TemplateInstCallback.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/TimeProfiler.h"
using namespace clang;
@@ -120,8 +120,9 @@ public:
}
IncludeStack.push_back(IncludeLoc);
- S->DiagnoseNonDefaultPragmaPack(
- Sema::PragmaPackDiagnoseKind::NonDefaultStateAtInclude, IncludeLoc);
+ S->DiagnoseNonDefaultPragmaAlignPack(
+ Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
+ IncludeLoc);
}
break;
}
@@ -130,8 +131,8 @@ public:
if (llvm::timeTraceProfilerEnabled())
llvm::timeTraceProfilerEnd();
- S->DiagnoseNonDefaultPragmaPack(
- Sema::PragmaPackDiagnoseKind::ChangedStateAtExit,
+ S->DiagnoseNonDefaultPragmaAlignPack(
+ Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
IncludeStack.pop_back_val());
}
break;
@@ -157,10 +158,12 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
OriginalLexicalContext(nullptr), MSStructPragmaOn(false),
MSPointerToMemberRepresentationMethod(
LangOpts.getMSPointerToMemberRepresentationMethod()),
- VtorDispStack(LangOpts.getVtorDispMode()), PackStack(0),
+ VtorDispStack(LangOpts.getVtorDispMode()),
+ AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
- CodeSegStack(nullptr), FpPragmaStack(0xffffffff), CurInitSeg(nullptr),
- VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
+ CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()),
+ CurInitSeg(nullptr), VisContext(nullptr),
+ PragmaAttributeCurrentTargetDecl(nullptr),
IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr),
LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
@@ -235,7 +238,9 @@ void Sema::Initialize() {
return;
// Initialize predefined 128-bit integer types, if needed.
- if (Context.getTargetInfo().hasInt128Type()) {
+ if (Context.getTargetInfo().hasInt128Type() ||
+ (Context.getAuxTargetInfo() &&
+ Context.getAuxTargetInfo()->hasInt128Type())) {
// If either of the 128-bit integer types are unavailable to name lookup,
// define them now.
DeclarationName Int128 = &Context.Idents.get("__int128_t");
@@ -292,7 +297,7 @@ void Sema::Initialize() {
// core features.
if (getLangOpts().OpenCL) {
getOpenCLOptions().addSupport(
- Context.getTargetInfo().getSupportedOpenCLOpts());
+ Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
getOpenCLOptions().enableSupportedCore(getLangOpts());
addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
addImplicitTypedef("event_t", Context.OCLEventTy);
@@ -368,6 +373,18 @@ void Sema::Initialize() {
#include "clang/Basic/AArch64SVEACLETypes.def"
}
+ if (Context.getTargetInfo().getTriple().isPPC64() &&
+ Context.getTargetInfo().hasFeature("paired-vector-memops")) {
+ if (Context.getTargetInfo().hasFeature("mma")) {
+#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
+ addImplicitTypedef(#Name, Context.Id##Ty);
+#include "clang/Basic/PPCTypes.def"
+ }
+#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
+ addImplicitTypedef(#Name, Context.Id##Ty);
+#include "clang/Basic/PPCTypes.def"
+ }
+
if (Context.getTargetInfo().hasBuiltinMSVaList()) {
DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
if (IdResolver.begin(MSVaList) == IdResolver.end())
@@ -380,6 +397,9 @@ void Sema::Initialize() {
}
Sema::~Sema() {
+ assert(InstantiatingSpecializations.empty() &&
+ "failed to clean up an InstantiatingTemplate?");
+
if (VisContext) FreeVisContext();
// Kill all the active scopes.
@@ -493,7 +513,8 @@ void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
QualType SrcType,
SourceLocation Loc) {
Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context);
- if (!ExprNullability || *ExprNullability != NullabilityKind::Nullable)
+ if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
+ *ExprNullability != NullabilityKind::NullableResult))
return;
Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context);
@@ -539,8 +560,10 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
if (VK == VK_RValue && !E->isRValue()) {
switch (Kind) {
default:
- llvm_unreachable("can't implicitly cast lvalue to rvalue with this cast "
- "kind");
+ llvm_unreachable(("can't implicitly cast lvalue to rvalue with this cast "
+ "kind: " +
+ std::string(CastExpr::getCastKindName(Kind)))
+ .c_str());
case CK_Dependent:
case CK_LValueToRValue:
case CK_ArrayToPointerDecay:
@@ -583,7 +606,8 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
}
}
- return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK);
+ return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK,
+ CurFPFeatureOverrides());
}
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
@@ -1016,7 +1040,7 @@ void Sema::ActOnEndOfTranslationUnit() {
}
}
- DiagnoseUnterminatedPragmaPack();
+ DiagnoseUnterminatedPragmaAlignPack();
DiagnoseUnterminatedPragmaAttribute();
// All delayed member exception specs should be checked or we end up accepting
@@ -1195,7 +1219,7 @@ void Sema::ActOnEndOfTranslationUnit() {
if (DiagD->isReferenced()) {
if (isa<CXXMethodDecl>(DiagD))
Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
- << DiagD->getDeclName();
+ << DiagD;
else {
if (FD->getStorageClass() == SC_Static &&
!FD->isInlineSpecified() &&
@@ -1203,20 +1227,20 @@ void Sema::ActOnEndOfTranslationUnit() {
SourceMgr.getExpansionLoc(FD->getLocation())))
Diag(DiagD->getLocation(),
diag::warn_unneeded_static_internal_decl)
- << DiagD->getDeclName();
+ << DiagD;
else
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
- << /*function*/0 << DiagD->getDeclName();
+ << /*function*/ 0 << DiagD;
}
} else {
if (FD->getDescribedFunctionTemplate())
Diag(DiagD->getLocation(), diag::warn_unused_template)
- << /*function*/0 << DiagD->getDeclName();
+ << /*function*/ 0 << DiagD;
else
- Diag(DiagD->getLocation(),
- isa<CXXMethodDecl>(DiagD) ? diag::warn_unused_member_function
+ Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
+ ? diag::warn_unused_member_function
: diag::warn_unused_function)
- << DiagD->getDeclName();
+ << DiagD;
}
} else {
const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
@@ -1224,20 +1248,19 @@ void Sema::ActOnEndOfTranslationUnit() {
DiagD = cast<VarDecl>(*I);
if (DiagD->isReferenced()) {
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
- << /*variable*/1 << DiagD->getDeclName();
+ << /*variable*/ 1 << DiagD;
} else if (DiagD->getType().isConstQualified()) {
const SourceManager &SM = SourceMgr;
if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
!PP.getLangOpts().IsHeaderFile)
Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
- << DiagD->getDeclName();
+ << DiagD;
} else {
if (DiagD->getDescribedVarTemplate())
Diag(DiagD->getLocation(), diag::warn_unused_template)
- << /*variable*/1 << DiagD->getDeclName();
+ << /*variable*/ 1 << DiagD;
else
- Diag(DiagD->getLocation(), diag::warn_unused_variable)
- << DiagD->getDeclName();
+ Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD;
}
}
}
@@ -1433,11 +1456,24 @@ void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
}
Sema::SemaDiagnosticBuilder
-Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) {
- SemaDiagnosticBuilder Builder(Diag(Loc, PD.getDiagID()));
- PD.Emit(Builder);
+Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) {
+ return Diag(Loc, PD.getDiagID(), DeferHint) << PD;
+}
- return Builder;
+bool Sema::hasUncompilableErrorOccurred() const {
+ if (getDiagnostics().hasUncompilableErrorOccurred())
+ return true;
+ auto *FD = dyn_cast<FunctionDecl>(CurContext);
+ if (!FD)
+ return false;
+ auto Loc = DeviceDeferredDiags.find(FD);
+ if (Loc == DeviceDeferredDiags.end())
+ return false;
+ for (auto PDAt : Loc->second) {
+ if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID()))
+ return true;
+ }
+ return false;
}
// Print notes showing how we can reach FD starting from an a priori
@@ -1490,7 +1526,7 @@ public:
typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
// Whether the function is already in the current use-path.
- llvm::SmallSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
+ llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
// The current use-path.
llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
@@ -1499,7 +1535,7 @@ public:
// case not in OpenMP device context. Done[1] is for the case in OpenMP
// device context. We need two sets because diagnostics emission may be
// different depending on whether it is in OpenMP device context.
- llvm::SmallSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
+ llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
// Emission state of the root node of the current use graph.
bool ShouldEmitRootNode;
@@ -1549,7 +1585,8 @@ public:
S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD))
return;
// Finalize analysis of OpenMP-specific constructs.
- if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1)
+ if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
+ (ShouldEmitRootNode || InOMPDeviceContext))
S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
if (Caller)
S.DeviceKnownEmittedFns[FD] = {Caller, Loc};
@@ -1649,9 +1686,9 @@ void Sema::emitDeferredDiags() {
// until we discover that the function is known-emitted, at which point we take
// it out of this map and emit the diagnostic.
-Sema::DeviceDiagBuilder::DeviceDiagBuilder(Kind K, SourceLocation Loc,
- unsigned DiagID, FunctionDecl *Fn,
- Sema &S)
+Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
+ unsigned DiagID,
+ FunctionDecl *Fn, Sema &S)
: S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
switch (K) {
@@ -1659,7 +1696,8 @@ Sema::DeviceDiagBuilder::DeviceDiagBuilder(Kind K, SourceLocation Loc,
break;
case K_Immediate:
case K_ImmediateWithCallStack:
- ImmediateDiag.emplace(S.Diag(Loc, DiagID));
+ ImmediateDiag.emplace(
+ ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
break;
case K_Deferred:
assert(Fn && "Must have a function to attach the deferred diag to.");
@@ -1670,7 +1708,7 @@ Sema::DeviceDiagBuilder::DeviceDiagBuilder(Kind K, SourceLocation Loc,
}
}
-Sema::DeviceDiagBuilder::DeviceDiagBuilder(DeviceDiagBuilder &&D)
+Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
: S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
PartialDiagId(D.PartialDiagId) {
@@ -1680,7 +1718,7 @@ Sema::DeviceDiagBuilder::DeviceDiagBuilder(DeviceDiagBuilder &&D)
D.PartialDiagId.reset();
}
-Sema::DeviceDiagBuilder::~DeviceDiagBuilder() {
+Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
if (ImmediateDiag) {
// Emit our diagnostic and, if it was a warning or error, output a callstack
// if Fn isn't a priori known-emitted.
@@ -1695,7 +1733,8 @@ Sema::DeviceDiagBuilder::~DeviceDiagBuilder() {
}
}
-Sema::DeviceDiagBuilder Sema::targetDiag(SourceLocation Loc, unsigned DiagID) {
+Sema::SemaDiagnosticBuilder Sema::targetDiag(SourceLocation Loc,
+ unsigned DiagID) {
if (LangOpts.OpenMP)
return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID)
: diagIfOpenMPHostCode(Loc, DiagID);
@@ -1706,8 +1745,32 @@ Sema::DeviceDiagBuilder Sema::targetDiag(SourceLocation Loc, unsigned DiagID) {
if (getLangOpts().SYCLIsDevice)
return SYCLDiagIfDeviceCode(Loc, DiagID);
- return DeviceDiagBuilder(DeviceDiagBuilder::K_Immediate, Loc, DiagID,
- getCurFunctionDecl(), *this);
+ return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
+ getCurFunctionDecl(), *this);
+}
+
+Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
+ bool DeferHint) {
+ bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
+ bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag &&
+ DiagnosticIDs::isDeferrable(DiagID) &&
+ (DeferHint || !IsError);
+ auto SetIsLastErrorImmediate = [&](bool Flag) {
+ if (IsError)
+ IsLastErrorImmediate = Flag;
+ };
+ if (!ShouldDefer) {
+ SetIsLastErrorImmediate(true);
+ return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc,
+ DiagID, getCurFunctionDecl(), *this);
+ }
+
+ SemaDiagnosticBuilder DB =
+ getLangOpts().CUDAIsDevice
+ ? CUDADiagIfDeviceCode(Loc, DiagID)
+ : CUDADiagIfHostCode(Loc, DiagID);
+ SetIsLastErrorImmediate(DB.isImmediate());
+ return DB;
}
void Sema::checkDeviceDecl(const ValueDecl *D, SourceLocation Loc) {
@@ -1732,6 +1795,15 @@ void Sema::checkDeviceDecl(const ValueDecl *D, SourceLocation Loc) {
if (Ty->isDependentType())
return;
+ if (Ty->isExtIntType()) {
+ if (!Context.getTargetInfo().hasExtIntType()) {
+ targetDiag(Loc, diag::err_device_unsupported_type)
+ << D << false /*show bit size*/ << 0 /*bitsize*/
+ << Ty << Context.getTargetInfo().getTriple().str();
+ }
+ return;
+ }
+
if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
((Ty->isFloat128Type() ||
(Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
@@ -1739,7 +1811,8 @@ void Sema::checkDeviceDecl(const ValueDecl *D, SourceLocation Loc) {
(Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
!Context.getTargetInfo().hasInt128Type())) {
targetDiag(Loc, diag::err_device_unsupported_type)
- << D << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
+ << D << true /*show bit size*/
+ << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
<< Context.getTargetInfo().getTriple().str();
targetDiag(D->getLocation(), diag::note_defined_here) << D;
}
@@ -1768,7 +1841,7 @@ bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
loc = getSourceManager().getExpansionLoc(loc);
// If that's written with the name, stop here.
- SmallVector<char, 16> buffer;
+ SmallString<16> buffer;
if (getPreprocessor().getSpelling(loc, buffer) == name) {
locref = loc;
return true;
diff --git a/clang/lib/Sema/SemaAccess.cpp b/clang/lib/Sema/SemaAccess.cpp
index bd15b81cbed0..be30445d143c 100644
--- a/clang/lib/Sema/SemaAccess.cpp
+++ b/clang/lib/Sema/SemaAccess.cpp
@@ -1476,7 +1476,8 @@ void Sema::HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *D) {
} else if (FunctionDecl *FN = dyn_cast<FunctionDecl>(D)) {
DC = FN;
} else if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) {
- DC = cast<DeclContext>(TD->getTemplatedDecl());
+ if (isa<DeclContext>(TD->getTemplatedDecl()))
+ DC = cast<DeclContext>(TD->getTemplatedDecl());
}
EffectiveContext EC(DC);
diff --git a/clang/lib/Sema/SemaAttr.cpp b/clang/lib/Sema/SemaAttr.cpp
index b354e810974c..6e441a0ded0d 100644
--- a/clang/lib/Sema/SemaAttr.cpp
+++ b/clang/lib/Sema/SemaAttr.cpp
@@ -48,28 +48,38 @@ Sema::PragmaStackSentinelRAII::~PragmaStackSentinelRAII() {
}
void Sema::AddAlignmentAttributesForRecord(RecordDecl *RD) {
- // If there is no pack value, we don't need any attributes.
- if (!PackStack.CurrentValue)
+ AlignPackInfo InfoVal = AlignPackStack.CurrentValue;
+ AlignPackInfo::Mode M = InfoVal.getAlignMode();
+ bool IsPackSet = InfoVal.IsPackSet();
+ bool IsXLPragma = getLangOpts().XLPragmaPack;
+
+ // If we are not under mac68k/natural alignment mode and also there is no pack
+ // value, we don't need any attributes.
+ if (!IsPackSet && M != AlignPackInfo::Mac68k && M != AlignPackInfo::Natural)
return;
- // Otherwise, check to see if we need a max field alignment attribute.
- if (unsigned Alignment = PackStack.CurrentValue) {
- if (Alignment == Sema::kMac68kAlignmentSentinel)
- RD->addAttr(AlignMac68kAttr::CreateImplicit(Context));
- else
- RD->addAttr(MaxFieldAlignmentAttr::CreateImplicit(Context,
- Alignment * 8));
+ if (M == AlignPackInfo::Mac68k && (IsXLPragma || InfoVal.IsAlignAttr())) {
+ RD->addAttr(AlignMac68kAttr::CreateImplicit(Context));
+ } else if (IsPackSet) {
+ // Check to see if we need a max field alignment attribute.
+ RD->addAttr(MaxFieldAlignmentAttr::CreateImplicit(
+ Context, InfoVal.getPackNumber() * 8));
}
- if (PackIncludeStack.empty())
+
+ if (IsXLPragma && M == AlignPackInfo::Natural)
+ RD->addAttr(AlignNaturalAttr::CreateImplicit(Context));
+
+ if (AlignPackIncludeStack.empty())
return;
- // The #pragma pack affected a record in an included file, so Clang should
- // warn when that pragma was written in a file that included the included
- // file.
- for (auto &PackedInclude : llvm::reverse(PackIncludeStack)) {
- if (PackedInclude.CurrentPragmaLocation != PackStack.CurrentPragmaLocation)
+ // The #pragma align/pack affected a record in an included file, so Clang
+ // should warn when that pragma was written in a file that included the
+ // included file.
+ for (auto &AlignPackedInclude : llvm::reverse(AlignPackIncludeStack)) {
+ if (AlignPackedInclude.CurrentPragmaLocation !=
+ AlignPackStack.CurrentPragmaLocation)
break;
- if (PackedInclude.HasNonDefaultValue)
- PackedInclude.ShouldWarnOnInclude = true;
+ if (AlignPackedInclude.HasNonDefaultValue)
+ AlignPackedInclude.ShouldWarnOnInclude = true;
}
}
@@ -205,23 +215,27 @@ void Sema::inferGslOwnerPointerAttribute(CXXRecordDecl *Record) {
void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc) {
PragmaMsStackAction Action = Sema::PSK_Reset;
- unsigned Alignment = 0;
+ AlignPackInfo::Mode ModeVal = AlignPackInfo::Native;
+
switch (Kind) {
- // For all targets we support native and natural are the same.
+ // For most of the platforms we support, native and natural are the same.
+ // With XL, native is the same as power, natural means something else.
//
// FIXME: This is not true on Darwin/PPC.
case POAK_Native:
case POAK_Power:
+ Action = Sema::PSK_Push_Set;
+ break;
case POAK_Natural:
Action = Sema::PSK_Push_Set;
- Alignment = 0;
+ ModeVal = AlignPackInfo::Natural;
break;
// Note that '#pragma options align=packed' is not equivalent to attribute
// packed, it has a different precedence relative to attribute aligned.
case POAK_Packed:
Action = Sema::PSK_Push_Set;
- Alignment = 1;
+ ModeVal = AlignPackInfo::Packed;
break;
case POAK_Mac68k:
@@ -231,15 +245,15 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
return;
}
Action = Sema::PSK_Push_Set;
- Alignment = Sema::kMac68kAlignmentSentinel;
+ ModeVal = AlignPackInfo::Mac68k;
break;
-
case POAK_Reset:
// Reset just pops the top of the stack, or resets the current alignment to
// default.
Action = Sema::PSK_Pop;
- if (PackStack.Stack.empty()) {
- if (PackStack.CurrentValue) {
+ if (AlignPackStack.Stack.empty()) {
+ if (AlignPackStack.CurrentValue.getAlignMode() != AlignPackInfo::Native ||
+ AlignPackStack.CurrentValue.IsPackAttr()) {
Action = Sema::PSK_Reset;
} else {
Diag(PragmaLoc, diag::warn_pragma_options_align_reset_failed)
@@ -250,7 +264,9 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
break;
}
- PackStack.Act(PragmaLoc, Action, StringRef(), Alignment);
+ AlignPackInfo Info(ModeVal, getLangOpts().XLPragmaPack);
+
+ AlignPackStack.Act(PragmaLoc, Action, StringRef(), Info);
}
void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action,
@@ -295,98 +311,129 @@ void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionA
void Sema::ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *alignment) {
+ bool IsXLPragma = getLangOpts().XLPragmaPack;
+ // XL pragma pack does not support identifier syntax.
+ if (IsXLPragma && !SlotLabel.empty()) {
+ Diag(PragmaLoc, diag::err_pragma_pack_identifer_not_supported);
+ return;
+ }
+
+ const AlignPackInfo CurVal = AlignPackStack.CurrentValue;
Expr *Alignment = static_cast<Expr *>(alignment);
// If specified then alignment must be a "small" power of two.
unsigned AlignmentVal = 0;
+ AlignPackInfo::Mode ModeVal = CurVal.getAlignMode();
+
if (Alignment) {
- llvm::APSInt Val;
+ Optional<llvm::APSInt> Val;
+ Val = Alignment->getIntegerConstantExpr(Context);
// pack(0) is like pack(), which just works out since that is what
// we use 0 for in PackAttr.
- if (Alignment->isTypeDependent() ||
- Alignment->isValueDependent() ||
- !Alignment->isIntegerConstantExpr(Val, Context) ||
- !(Val == 0 || Val.isPowerOf2()) ||
- Val.getZExtValue() > 16) {
+ if (Alignment->isTypeDependent() || Alignment->isValueDependent() || !Val ||
+ !(*Val == 0 || Val->isPowerOf2()) || Val->getZExtValue() > 16) {
Diag(PragmaLoc, diag::warn_pragma_pack_invalid_alignment);
return; // Ignore
}
- AlignmentVal = (unsigned) Val.getZExtValue();
+ if (IsXLPragma && *Val == 0) {
+ // pack(0) does not work out with XL.
+ Diag(PragmaLoc, diag::err_pragma_pack_invalid_alignment);
+ return; // Ignore
+ }
+
+ AlignmentVal = (unsigned)Val->getZExtValue();
}
+
if (Action == Sema::PSK_Show) {
// Show the current alignment, making sure to show the right value
// for the default.
// FIXME: This should come from the target.
- AlignmentVal = PackStack.CurrentValue;
- if (AlignmentVal == 0)
- AlignmentVal = 8;
- if (AlignmentVal == Sema::kMac68kAlignmentSentinel)
+ AlignmentVal = CurVal.IsPackSet() ? CurVal.getPackNumber() : 8;
+ if (ModeVal == AlignPackInfo::Mac68k &&
+ (IsXLPragma || CurVal.IsAlignAttr()))
Diag(PragmaLoc, diag::warn_pragma_pack_show) << "mac68k";
else
Diag(PragmaLoc, diag::warn_pragma_pack_show) << AlignmentVal;
}
+
// MSDN, C/C++ Preprocessor Reference > Pragma Directives > pack:
// "#pragma pack(pop, identifier, n) is undefined"
if (Action & Sema::PSK_Pop) {
if (Alignment && !SlotLabel.empty())
Diag(PragmaLoc, diag::warn_pragma_pack_pop_identifier_and_alignment);
- if (PackStack.Stack.empty())
+ if (AlignPackStack.Stack.empty()) {
+ assert(CurVal.getAlignMode() == AlignPackInfo::Native &&
+ "Empty pack stack can only be at Native alignment mode.");
Diag(PragmaLoc, diag::warn_pragma_pop_failed) << "pack" << "stack empty";
+ }
}
- PackStack.Act(PragmaLoc, Action, SlotLabel, AlignmentVal);
+ AlignPackInfo Info(ModeVal, AlignmentVal, IsXLPragma);
+
+ AlignPackStack.Act(PragmaLoc, Action, SlotLabel, Info);
}
-void Sema::DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
- SourceLocation IncludeLoc) {
- if (Kind == PragmaPackDiagnoseKind::NonDefaultStateAtInclude) {
- SourceLocation PrevLocation = PackStack.CurrentPragmaLocation;
+void Sema::DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
+ SourceLocation IncludeLoc) {
+ if (Kind == PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude) {
+ SourceLocation PrevLocation = AlignPackStack.CurrentPragmaLocation;
// Warn about non-default alignment at #includes (without redundant
// warnings for the same directive in nested includes).
// The warning is delayed until the end of the file to avoid warnings
// for files that don't have any records that are affected by the modified
// alignment.
bool HasNonDefaultValue =
- PackStack.hasValue() &&
- (PackIncludeStack.empty() ||
- PackIncludeStack.back().CurrentPragmaLocation != PrevLocation);
- PackIncludeStack.push_back(
- {PackStack.CurrentValue,
- PackStack.hasValue() ? PrevLocation : SourceLocation(),
+ AlignPackStack.hasValue() &&
+ (AlignPackIncludeStack.empty() ||
+ AlignPackIncludeStack.back().CurrentPragmaLocation != PrevLocation);
+ AlignPackIncludeStack.push_back(
+ {AlignPackStack.CurrentValue,
+ AlignPackStack.hasValue() ? PrevLocation : SourceLocation(),
HasNonDefaultValue, /*ShouldWarnOnInclude*/ false});
return;
}
- assert(Kind == PragmaPackDiagnoseKind::ChangedStateAtExit && "invalid kind");
- PackIncludeState PrevPackState = PackIncludeStack.pop_back_val();
- if (PrevPackState.ShouldWarnOnInclude) {
+ assert(Kind == PragmaAlignPackDiagnoseKind::ChangedStateAtExit &&
+ "invalid kind");
+ AlignPackIncludeState PrevAlignPackState =
+ AlignPackIncludeStack.pop_back_val();
+ // FIXME: AlignPackStack may contain both #pragma align and #pragma pack
+ // information, diagnostics below might not be accurate if we have mixed
+ // pragmas.
+ if (PrevAlignPackState.ShouldWarnOnInclude) {
// Emit the delayed non-default alignment at #include warning.
Diag(IncludeLoc, diag::warn_pragma_pack_non_default_at_include);
- Diag(PrevPackState.CurrentPragmaLocation, diag::note_pragma_pack_here);
+ Diag(PrevAlignPackState.CurrentPragmaLocation, diag::note_pragma_pack_here);
}
// Warn about modified alignment after #includes.
- if (PrevPackState.CurrentValue != PackStack.CurrentValue) {
+ if (PrevAlignPackState.CurrentValue != AlignPackStack.CurrentValue) {
Diag(IncludeLoc, diag::warn_pragma_pack_modified_after_include);
- Diag(PackStack.CurrentPragmaLocation, diag::note_pragma_pack_here);
+ Diag(AlignPackStack.CurrentPragmaLocation, diag::note_pragma_pack_here);
}
}
-void Sema::DiagnoseUnterminatedPragmaPack() {
- if (PackStack.Stack.empty())
+void Sema::DiagnoseUnterminatedPragmaAlignPack() {
+ if (AlignPackStack.Stack.empty())
return;
bool IsInnermost = true;
- for (const auto &StackSlot : llvm::reverse(PackStack.Stack)) {
+
+ // FIXME: AlignPackStack may contain both #pragma align and #pragma pack
+ // information, diagnostics below might not be accurate if we have mixed
+ // pragmas.
+ for (const auto &StackSlot : llvm::reverse(AlignPackStack.Stack)) {
Diag(StackSlot.PragmaPushLocation, diag::warn_pragma_pack_no_pop_eof);
// The user might have already reset the alignment, so suggest replacing
// the reset with a pop.
- if (IsInnermost && PackStack.CurrentValue == PackStack.DefaultValue) {
- DiagnosticBuilder DB = Diag(PackStack.CurrentPragmaLocation,
- diag::note_pragma_pack_pop_instead_reset);
- SourceLocation FixItLoc = Lexer::findLocationAfterToken(
- PackStack.CurrentPragmaLocation, tok::l_paren, SourceMgr, LangOpts,
- /*SkipTrailing=*/false);
+ if (IsInnermost &&
+ AlignPackStack.CurrentValue == AlignPackStack.DefaultValue) {
+ auto DB = Diag(AlignPackStack.CurrentPragmaLocation,
+ diag::note_pragma_pack_pop_instead_reset);
+ SourceLocation FixItLoc =
+ Lexer::findLocationAfterToken(AlignPackStack.CurrentPragmaLocation,
+ tok::l_paren, SourceMgr, LangOpts,
+ /*SkipTrailing=*/false);
if (FixItLoc.isValid())
DB << FixItHint::CreateInsertion(FixItLoc, "pop");
}
@@ -417,10 +464,7 @@ void Sema::ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
PragmaMsStackAction Action,
PragmaFloatControlKind Value) {
- unsigned NewValue = FpPragmaStack.hasValue()
- ? FpPragmaStack.CurrentValue
- : CurFPFeatureOverrides().getAsOpaqueInt();
- FPOptionsOverride NewFPFeatures(NewValue);
+ FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
if ((Action == PSK_Push_Set || Action == PSK_Push || Action == PSK_Pop) &&
!(CurContext->isTranslationUnit()) && !CurContext->isNamespace()) {
// Push and pop can only occur at file or namespace scope.
@@ -432,8 +476,7 @@ void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
llvm_unreachable("invalid pragma float_control kind");
case PFC_Precise:
NewFPFeatures.setFPPreciseEnabled(true);
- NewValue = NewFPFeatures.getAsOpaqueInt();
- FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
break;
case PFC_NoPrecise:
if (CurFPFeatures.getFPExceptionMode() == LangOptions::FPE_Strict)
@@ -442,25 +485,21 @@ void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
Diag(Loc, diag::err_pragma_fc_noprecise_requires_nofenv);
else
NewFPFeatures.setFPPreciseEnabled(false);
- NewValue = NewFPFeatures.getAsOpaqueInt();
- FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
break;
case PFC_Except:
if (!isPreciseFPEnabled())
Diag(Loc, diag::err_pragma_fc_except_requires_precise);
else
NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Strict);
- NewValue = NewFPFeatures.getAsOpaqueInt();
- FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
break;
case PFC_NoExcept:
NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Ignore);
- NewValue = NewFPFeatures.getAsOpaqueInt();
- FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
break;
case PFC_Push:
- FpPragmaStack.Act(Loc, Sema::PSK_Push_Set, StringRef(),
- NewFPFeatures.getAsOpaqueInt());
+ FpPragmaStack.Act(Loc, Sema::PSK_Push_Set, StringRef(), NewFPFeatures);
break;
case PFC_Pop:
if (FpPragmaStack.Stack.empty()) {
@@ -468,14 +507,11 @@ void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
<< "stack empty";
return;
}
- FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures.getAsOpaqueInt());
- NewValue = FpPragmaStack.CurrentValue;
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
+ NewFPFeatures = FpPragmaStack.CurrentValue;
break;
}
- FPOptionsOverride NewOverrides;
- if (NewValue != FpPragmaStack.DefaultValue)
- NewOverrides.getFromOpaqueInt(NewValue);
- CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
void Sema::ActOnPragmaMSPointersToMembers(
@@ -494,9 +530,70 @@ void Sema::ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
VtorDispStack.Act(PragmaLoc, Action, StringRef(), Mode);
}
-bool Sema::UnifySection(StringRef SectionName,
- int SectionFlags,
- DeclaratorDecl *Decl) {
+template <>
+void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
+ PragmaMsStackAction Action,
+ llvm::StringRef StackSlotLabel,
+ AlignPackInfo Value) {
+ if (Action == PSK_Reset) {
+ CurrentValue = DefaultValue;
+ CurrentPragmaLocation = PragmaLocation;
+ return;
+ }
+ if (Action & PSK_Push)
+ Stack.emplace_back(Slot(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
+ PragmaLocation));
+ else if (Action & PSK_Pop) {
+ if (!StackSlotLabel.empty()) {
+ // If we've got a label, try to find it and jump there.
+ auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
+ return x.StackSlotLabel == StackSlotLabel;
+ });
+ // We found the label, so pop from there.
+ if (I != Stack.rend()) {
+ CurrentValue = I->Value;
+ CurrentPragmaLocation = I->PragmaLocation;
+ Stack.erase(std::prev(I.base()), Stack.end());
+ }
+ } else if (Value.IsXLStack() && Value.IsAlignAttr() &&
+ CurrentValue.IsPackAttr()) {
+ // XL '#pragma align(reset)' would pop the stack until
+ // a current in effect pragma align is popped.
+ auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
+ return x.Value.IsAlignAttr();
+ });
+ // If we found pragma align so pop from there.
+ if (I != Stack.rend()) {
+ Stack.erase(std::prev(I.base()), Stack.end());
+ if (Stack.empty()) {
+ CurrentValue = DefaultValue;
+ CurrentPragmaLocation = PragmaLocation;
+ } else {
+ CurrentValue = Stack.back().Value;
+ CurrentPragmaLocation = Stack.back().PragmaLocation;
+ Stack.pop_back();
+ }
+ }
+ } else if (!Stack.empty()) {
+ // xl '#pragma align' sets the baseline, and `#pragma pack` cannot pop
+ // over the baseline.
+ if (Value.IsXLStack() && Value.IsPackAttr() && CurrentValue.IsAlignAttr())
+ return;
+
+ // We don't have a label, just pop the last entry.
+ CurrentValue = Stack.back().Value;
+ CurrentPragmaLocation = Stack.back().PragmaLocation;
+ Stack.pop_back();
+ }
+ }
+ if (Action & PSK_Set) {
+ CurrentValue = Value;
+ CurrentPragmaLocation = PragmaLocation;
+ }
+}
+
+bool Sema::UnifySection(StringRef SectionName, int SectionFlags,
+ NamedDecl *Decl) {
SourceLocation PragmaLocation;
if (auto A = Decl->getAttr<SectionAttr>())
if (A->isImplicit())
@@ -968,10 +1065,7 @@ void Sema::ActOnPragmaVisibility(const IdentifierInfo* VisType,
void Sema::ActOnPragmaFPContract(SourceLocation Loc,
LangOptions::FPModeKind FPC) {
- unsigned NewValue = FpPragmaStack.hasValue()
- ? FpPragmaStack.CurrentValue
- : CurFPFeatureOverrides().getAsOpaqueInt();
- FPOptionsOverride NewFPFeatures(NewValue);
+ FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
switch (FPC) {
case LangOptions::FPM_On:
NewFPFeatures.setAllowFPContractWithinStatement();
@@ -982,54 +1076,45 @@ void Sema::ActOnPragmaFPContract(SourceLocation Loc,
case LangOptions::FPM_Off:
NewFPFeatures.setDisallowFPContract();
break;
+ case LangOptions::FPM_FastHonorPragmas:
+ llvm_unreachable("Should not happen");
}
+ FpPragmaStack.Act(Loc, Sema::PSK_Set, StringRef(), NewFPFeatures);
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
- FpPragmaStack.Act(Loc, Sema::PSK_Set, StringRef(),
- NewFPFeatures.getAsOpaqueInt());
}
void Sema::ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled) {
- unsigned NewValue = FpPragmaStack.hasValue()
- ? FpPragmaStack.CurrentValue
- : CurFPFeatureOverrides().getAsOpaqueInt();
- FPOptionsOverride NewFPFeatures(NewValue);
+ FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
NewFPFeatures.setAllowFPReassociateOverride(IsEnabled);
- NewValue = NewFPFeatures.getAsOpaqueInt();
- FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
- FPOptionsOverride NewOverrides(NewValue);
- CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
void Sema::setRoundingMode(SourceLocation Loc, llvm::RoundingMode FPR) {
- unsigned NewValue = FpPragmaStack.hasValue()
- ? FpPragmaStack.CurrentValue
- : CurFPFeatureOverrides().getAsOpaqueInt();
- FPOptionsOverride NewFPFeatures(NewValue);
+ // C2x: 7.6.2p3 If the FE_DYNAMIC mode is specified and FENV_ACCESS is "off",
+ // the translator may assume that the default rounding mode is in effect.
+ if (FPR == llvm::RoundingMode::Dynamic &&
+ !CurFPFeatures.getAllowFEnvAccess() &&
+ CurFPFeatures.getFPExceptionMode() == LangOptions::FPE_Ignore)
+ FPR = llvm::RoundingMode::NearestTiesToEven;
+
+ FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
NewFPFeatures.setRoundingModeOverride(FPR);
- NewValue = NewFPFeatures.getAsOpaqueInt();
- FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
- FPOptionsOverride NewOverrides(NewValue);
- CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
void Sema::setExceptionMode(SourceLocation Loc,
LangOptions::FPExceptionModeKind FPE) {
- unsigned NewValue = FpPragmaStack.hasValue()
- ? FpPragmaStack.CurrentValue
- : CurFPFeatureOverrides().getAsOpaqueInt();
- FPOptionsOverride NewFPFeatures(NewValue);
+ FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
NewFPFeatures.setFPExceptionModeOverride(FPE);
- NewValue = NewFPFeatures.getAsOpaqueInt();
- FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
- FPOptionsOverride NewOverrides(NewValue);
- CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
void Sema::ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled) {
- unsigned NewValue = FpPragmaStack.hasValue()
- ? FpPragmaStack.CurrentValue
- : CurFPFeatureOverrides().getAsOpaqueInt();
- FPOptionsOverride NewFPFeatures(NewValue);
+ FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
+ auto LO = getLangOpts();
if (IsEnabled) {
// Verify Microsoft restriction:
// You can't enable fenv_access unless precise semantics are enabled.
@@ -1038,12 +1123,20 @@ void Sema::ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled) {
if (!isPreciseFPEnabled())
Diag(Loc, diag::err_pragma_fenv_requires_precise);
NewFPFeatures.setAllowFEnvAccessOverride(true);
- } else
+ // Enabling FENV access sets the RoundingMode to Dynamic.
+ // and ExceptionBehavior to Strict
+ NewFPFeatures.setRoundingModeOverride(llvm::RoundingMode::Dynamic);
+ NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Strict);
+ } else {
NewFPFeatures.setAllowFEnvAccessOverride(false);
- NewValue = NewFPFeatures.getAsOpaqueInt();
- FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
- FPOptionsOverride NewOverrides(NewValue);
- CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+ }
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
+ CurFPFeatures = NewFPFeatures.applyOverrides(LO);
+}
+
+void Sema::ActOnPragmaFPExceptions(SourceLocation Loc,
+ LangOptions::FPExceptionModeKind FPE) {
+ setExceptionMode(Loc, FPE);
}
void Sema::PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp
index 283a04683a32..ee91eb4c5deb 100644
--- a/clang/lib/Sema/SemaCUDA.cpp
+++ b/clang/lib/Sema/SemaCUDA.cpp
@@ -123,7 +123,8 @@ Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D,
return CFT_Device;
} else if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr)) {
return CFT_Host;
- } else if (D->isImplicit() && !IgnoreImplicitHDAttr) {
+ } else if ((D->isImplicit() || !D->isUserProvided()) &&
+ !IgnoreImplicitHDAttr) {
// Some implicit declarations (like intrinsic functions) are not marked.
// Set the most lenient target on them for maximal flexibility.
return CFT_HostDevice;
@@ -519,7 +520,6 @@ void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
VD->hasAttr<CUDASharedAttr>()) {
if (LangOpts.GPUAllowDeviceInit)
return;
- assert(!VD->isStaticLocal() || VD->hasAttr<CUDASharedAttr>());
bool AllowedInit = false;
if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init))
AllowedInit =
@@ -639,58 +639,63 @@ void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
}
}
-Sema::DeviceDiagBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
- unsigned DiagID) {
+Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
+ unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- DeviceDiagBuilder::Kind DiagKind = [this] {
+ SemaDiagnosticBuilder::Kind DiagKind = [&] {
+ if (!isa<FunctionDecl>(CurContext))
+ return SemaDiagnosticBuilder::K_Nop;
switch (CurrentCUDATarget()) {
case CFT_Global:
case CFT_Device:
- return DeviceDiagBuilder::K_Immediate;
+ return SemaDiagnosticBuilder::K_Immediate;
case CFT_HostDevice:
// An HD function counts as host code if we're compiling for host, and
// device code if we're compiling for device. Defer any errors in device
// mode until the function is known-emitted.
- if (getLangOpts().CUDAIsDevice) {
- return (getEmissionStatus(cast<FunctionDecl>(CurContext)) ==
- FunctionEmissionStatus::Emitted)
- ? DeviceDiagBuilder::K_ImmediateWithCallStack
- : DeviceDiagBuilder::K_Deferred;
- }
- return DeviceDiagBuilder::K_Nop;
-
+ if (!getLangOpts().CUDAIsDevice)
+ return SemaDiagnosticBuilder::K_Nop;
+ if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
+ return SemaDiagnosticBuilder::K_Immediate;
+ return (getEmissionStatus(cast<FunctionDecl>(CurContext)) ==
+ FunctionEmissionStatus::Emitted)
+ ? SemaDiagnosticBuilder::K_ImmediateWithCallStack
+ : SemaDiagnosticBuilder::K_Deferred;
default:
- return DeviceDiagBuilder::K_Nop;
+ return SemaDiagnosticBuilder::K_Nop;
}
}();
- return DeviceDiagBuilder(DiagKind, Loc, DiagID,
- dyn_cast<FunctionDecl>(CurContext), *this);
+ return SemaDiagnosticBuilder(DiagKind, Loc, DiagID,
+ dyn_cast<FunctionDecl>(CurContext), *this);
}
-Sema::DeviceDiagBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
- unsigned DiagID) {
+Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
+ unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- DeviceDiagBuilder::Kind DiagKind = [this] {
+ SemaDiagnosticBuilder::Kind DiagKind = [&] {
+ if (!isa<FunctionDecl>(CurContext))
+ return SemaDiagnosticBuilder::K_Nop;
switch (CurrentCUDATarget()) {
case CFT_Host:
- return DeviceDiagBuilder::K_Immediate;
+ return SemaDiagnosticBuilder::K_Immediate;
case CFT_HostDevice:
// An HD function counts as host code if we're compiling for host, and
// device code if we're compiling for device. Defer any errors in device
// mode until the function is known-emitted.
if (getLangOpts().CUDAIsDevice)
- return DeviceDiagBuilder::K_Nop;
-
+ return SemaDiagnosticBuilder::K_Nop;
+ if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
+ return SemaDiagnosticBuilder::K_Immediate;
return (getEmissionStatus(cast<FunctionDecl>(CurContext)) ==
FunctionEmissionStatus::Emitted)
- ? DeviceDiagBuilder::K_ImmediateWithCallStack
- : DeviceDiagBuilder::K_Deferred;
+ ? SemaDiagnosticBuilder::K_ImmediateWithCallStack
+ : SemaDiagnosticBuilder::K_Deferred;
default:
- return DeviceDiagBuilder::K_Nop;
+ return SemaDiagnosticBuilder::K_Nop;
}
}();
- return DeviceDiagBuilder(DiagKind, Loc, DiagID,
- dyn_cast<FunctionDecl>(CurContext), *this);
+ return SemaDiagnosticBuilder(DiagKind, Loc, DiagID,
+ dyn_cast<FunctionDecl>(CurContext), *this);
}
bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
@@ -711,24 +716,24 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
// Otherwise, mark the call in our call graph so we can traverse it later.
bool CallerKnownEmitted =
getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted;
- DeviceDiagBuilder::Kind DiagKind = [this, Caller, Callee,
- CallerKnownEmitted] {
+ SemaDiagnosticBuilder::Kind DiagKind = [this, Caller, Callee,
+ CallerKnownEmitted] {
switch (IdentifyCUDAPreference(Caller, Callee)) {
case CFP_Never:
- return DeviceDiagBuilder::K_Immediate;
case CFP_WrongSide:
- assert(Caller && "WrongSide calls require a non-null caller");
+ assert(Caller && "Never/wrongSide calls require a non-null caller");
// If we know the caller will be emitted, we know this wrong-side call
// will be emitted, so it's an immediate error. Otherwise, defer the
// error until we know the caller is emitted.
- return CallerKnownEmitted ? DeviceDiagBuilder::K_ImmediateWithCallStack
- : DeviceDiagBuilder::K_Deferred;
+ return CallerKnownEmitted
+ ? SemaDiagnosticBuilder::K_ImmediateWithCallStack
+ : SemaDiagnosticBuilder::K_Deferred;
default:
- return DeviceDiagBuilder::K_Nop;
+ return SemaDiagnosticBuilder::K_Nop;
}
}();
- if (DiagKind == DeviceDiagBuilder::K_Nop)
+ if (DiagKind == SemaDiagnosticBuilder::K_Nop)
return true;
// Avoid emitting this error twice for the same location. Using a hashtable
@@ -738,13 +743,15 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
if (!LocsWithCUDACallDiags.insert({Caller, Loc}).second)
return true;
- DeviceDiagBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
- << IdentifyCUDATarget(Callee) << Callee << IdentifyCUDATarget(Caller);
- DeviceDiagBuilder(DiagKind, Callee->getLocation(), diag::note_previous_decl,
- Caller, *this)
- << Callee;
- return DiagKind != DeviceDiagBuilder::K_Immediate &&
- DiagKind != DeviceDiagBuilder::K_ImmediateWithCallStack;
+ SemaDiagnosticBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
+ << IdentifyCUDATarget(Callee) << /*function*/ 0 << Callee
+ << IdentifyCUDATarget(Caller);
+ if (!Callee->getBuiltinID())
+ SemaDiagnosticBuilder(DiagKind, Callee->getLocation(),
+ diag::note_previous_decl, Caller, *this)
+ << Callee;
+ return DiagKind != SemaDiagnosticBuilder::K_Immediate &&
+ DiagKind != SemaDiagnosticBuilder::K_ImmediateWithCallStack;
}
// Check the wrong-sided reference capture of lambda for CUDA/HIP.
@@ -781,14 +788,14 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
bool ShouldCheck = CalleeIsDevice && CallerIsHost;
if (!ShouldCheck || !Capture.isReferenceCapture())
return;
- auto DiagKind = DeviceDiagBuilder::K_Deferred;
+ auto DiagKind = SemaDiagnosticBuilder::K_Deferred;
if (Capture.isVariableCapture()) {
- DeviceDiagBuilder(DiagKind, Capture.getLocation(),
- diag::err_capture_bad_target, Callee, *this)
+ SemaDiagnosticBuilder(DiagKind, Capture.getLocation(),
+ diag::err_capture_bad_target, Callee, *this)
<< Capture.getVariable();
} else if (Capture.isThisCapture()) {
- DeviceDiagBuilder(DiagKind, Capture.getLocation(),
- diag::err_capture_bad_target_this_ptr, Callee, *this);
+ SemaDiagnosticBuilder(DiagKind, Capture.getLocation(),
+ diag::err_capture_bad_target_this_ptr, Callee, *this);
}
return;
}
diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp
index 2efe26052c78..671820afd485 100644
--- a/clang/lib/Sema/SemaCast.cpp
+++ b/clang/lib/Sema/SemaCast.cpp
@@ -105,10 +105,10 @@ namespace {
// If this is an unbridged cast, wrap the result in an implicit
// cast that yields the unbridged-cast placeholder type.
if (IsARCUnbridgedCast) {
- castExpr = ImplicitCastExpr::Create(Self.Context,
- Self.Context.ARCUnbridgedCastTy,
- CK_Dependent, castExpr, nullptr,
- castExpr->getValueKind());
+ castExpr = ImplicitCastExpr::Create(
+ Self.Context, Self.Context.ARCUnbridgedCastTy, CK_Dependent,
+ castExpr, nullptr, castExpr->getValueKind(),
+ Self.CurFPFeatureOverrides());
}
updatePartOfExplicitCastFlags(castExpr);
return castExpr;
@@ -361,11 +361,10 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
DiscardMisalignedMemberAddress(DestType.getTypePtr(), E);
}
- return Op.complete(CXXStaticCastExpr::Create(Context, Op.ResultType,
- Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
- &Op.BasePath, DestTInfo,
- OpLoc, Parens.getEnd(),
- AngleBrackets));
+ return Op.complete(CXXStaticCastExpr::Create(
+ Context, Op.ResultType, Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
+ &Op.BasePath, DestTInfo, CurFPFeatureOverrides(), OpLoc,
+ Parens.getEnd(), AngleBrackets));
}
}
}
@@ -510,12 +509,10 @@ static void diagnoseBadCast(Sema &S, unsigned msg, CastType castType,
if (RecFrom && RecTo) {
auto DeclFrom = RecFrom->getAsCXXRecordDecl();
if (!DeclFrom->isCompleteDefinition())
- S.Diag(DeclFrom->getLocation(), diag::note_type_incomplete)
- << DeclFrom->getDeclName();
+ S.Diag(DeclFrom->getLocation(), diag::note_type_incomplete) << DeclFrom;
auto DeclTo = RecTo->getAsCXXRecordDecl();
if (!DeclTo->isCompleteDefinition())
- S.Diag(DeclTo->getLocation(), diag::note_type_incomplete)
- << DeclTo->getDeclName();
+ S.Diag(DeclTo->getLocation(), diag::note_type_incomplete) << DeclTo;
}
}
}
@@ -892,6 +889,18 @@ void CastOperation::CheckDynamicCast() {
return;
}
+ // Warns when dynamic_cast is used with RTTI data disabled.
+ if (!Self.getLangOpts().RTTIData) {
+ bool MicrosoftABI =
+ Self.getASTContext().getTargetInfo().getCXXABI().isMicrosoft();
+ bool isClangCL = Self.getDiagnostics().getDiagnosticOptions().getFormat() ==
+ DiagnosticOptions::MSVC;
+ if (MicrosoftABI || !DestPointee->isVoidType())
+ Self.Diag(OpRange.getBegin(),
+ diag::warn_no_dynamic_cast_with_rtti_disabled)
+ << isClangCL;
+ }
+
// Done. Everything else is run-time checks.
Kind = CK_Dynamic;
}
@@ -1245,7 +1254,13 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
return TC_Failed;
}
if (SrcType->isIntegralOrEnumerationType()) {
- Kind = CK_IntegralCast;
+ // [expr.static.cast]p10 If the enumeration type has a fixed underlying
+ // type, the value is first converted to that type by integral conversion
+ const EnumType *Enum = DestType->getAs<EnumType>();
+ Kind = Enum->getDecl()->isFixed() &&
+ Enum->getDecl()->getIntegerType()->isBooleanType()
+ ? CK_IntegralToBoolean
+ : CK_IntegralCast;
return TC_Success;
} else if (SrcType->isRealFloatingType()) {
Kind = CK_FloatingToIntegral;
@@ -2204,6 +2219,12 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
bool destIsVector = DestType->isVectorType();
bool srcIsVector = SrcType->isVectorType();
if (srcIsVector || destIsVector) {
+ // Allow bitcasting between SVE VLATs and VLSTs, and vice-versa.
+ if (Self.isValidSveBitcast(SrcType, DestType)) {
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+
// The non-vector type, if any, must have integral type. This is
// the same rule that C vector casts use; note, however, that enum
// types are not integral in C++.
@@ -2659,6 +2680,8 @@ static void DiagnoseBadFunctionCast(Sema &Self, const ExprResult &SrcExpr,
return;
if (SrcType->isComplexIntegerType() && DestType->isComplexIntegerType())
return;
+ if (SrcType->isFixedPointType() && DestType->isFixedPointType())
+ return;
Self.Diag(SrcExpr.get()->getExprLoc(),
diag::warn_bad_function_cast)
@@ -2690,6 +2713,17 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ // If the type is dependent, we won't do any other semantic analysis now.
+ if (Self.getASTContext().isDependenceAllowed() &&
+ (DestType->isDependentType() || SrcExpr.get()->isTypeDependent() ||
+ SrcExpr.get()->isValueDependent())) {
+ assert((DestType->containsErrors() || SrcExpr.get()->containsErrors() ||
+ SrcExpr.get()->containsErrors()) &&
+ "should only occur in error-recovery path.");
+ assert(Kind == CK_Dependent);
+ return;
+ }
+
// Overloads are allowed with C extensions, so we need to support them.
if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
DeclAccessPair DAP;
@@ -2724,6 +2758,13 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ // Allow bitcasting between compatible SVE vector types.
+ if ((SrcType->isVectorType() || DestType->isVectorType()) &&
+ Self.isValidSveBitcast(SrcType, DestType)) {
+ Kind = CK_BitCast;
+ return;
+ }
+
if (!DestType->isScalarType() && !DestType->isVectorType()) {
const RecordType *DestRecordTy = DestType->getAs<RecordType>();
@@ -3027,9 +3068,9 @@ ExprResult Sema::BuildCStyleCastExpr(SourceLocation LPLoc,
// -Wcast-qual
DiagnoseCastQual(Op.Self, Op.SrcExpr, Op.DestType);
- return Op.complete(CStyleCastExpr::Create(Context, Op.ResultType,
- Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
- &Op.BasePath, CastTypeInfo, LPLoc, RPLoc));
+ return Op.complete(CStyleCastExpr::Create(
+ Context, Op.ResultType, Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
+ &Op.BasePath, CurFPFeatureOverrides(), CastTypeInfo, LPLoc, RPLoc));
}
ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
@@ -3052,7 +3093,7 @@ ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
if (auto *ConstructExpr = dyn_cast<CXXConstructExpr>(SubExpr))
ConstructExpr->setParenOrBraceRange(SourceRange(LPLoc, RPLoc));
- return Op.complete(CXXFunctionalCastExpr::Create(Context, Op.ResultType,
- Op.ValueKind, CastTypeInfo, Op.Kind,
- Op.SrcExpr.get(), &Op.BasePath, LPLoc, RPLoc));
+ return Op.complete(CXXFunctionalCastExpr::Create(
+ Context, Op.ResultType, Op.ValueKind, CastTypeInfo, Op.Kind,
+ Op.SrcExpr.get(), &Op.BasePath, CurFPFeatureOverrides(), LPLoc, RPLoc));
}
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 509d88e25000..2d3d36f4adad 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -75,6 +75,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/AtomicOrdering.h"
@@ -1274,11 +1275,8 @@ static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
// \return True if a semantic error has been found, false otherwise.
static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
CallExpr *Call) {
- if (Call->getNumArgs() != 1) {
- S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num)
- << Call->getDirectCallee() << Call->getSourceRange();
+ if (checkArgCount(S, Call, 1))
return true;
- }
auto RT = Call->getArg(0)->getType();
if (!RT->isPointerType() || RT->getPointeeType()
@@ -1425,6 +1423,7 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case llvm::Triple::x86_64:
return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
@@ -1573,15 +1572,14 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinSetjmp(TheCall))
return ExprError();
break;
- case Builtin::BI_setjmp:
- case Builtin::BI_setjmpex:
- if (checkArgCount(*this, TheCall, 1))
- return true;
- break;
case Builtin::BI__builtin_classify_type:
if (checkArgCount(*this, TheCall, 1)) return true;
TheCall->setType(Context.IntTy);
break;
+ case Builtin::BI__builtin_complex:
+ if (SemaBuiltinComplex(TheCall))
+ return ExprError();
+ break;
case Builtin::BI__builtin_constant_p: {
if (checkArgCount(*this, TheCall, 1)) return true;
ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
@@ -1818,8 +1816,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
SmallVector<PartialDiagnosticAt, 8> Notes;
Expr::EvalResult Eval;
Eval.Diag = &Notes;
- if ((!ProbArg->EvaluateAsConstantExpr(Eval, Expr::EvaluateForCodeGen,
- Context)) ||
+ if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) ||
!Eval.Val.isFloat()) {
Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
<< ProbArg->getSourceRange();
@@ -1946,7 +1943,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// -Wframe-address warning if non-zero passed to builtin
// return/frame address.
Expr::EvalResult Result;
- if (TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
+ if (!TheCall->getArg(0)->isValueDependent() &&
+ TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
Result.Val.getInt() != 0)
Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
<< ((BuiltinID == Builtin::BI__builtin_return_address)
@@ -2284,10 +2282,7 @@ bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
return false;
- llvm::APSInt CoprocNoAP;
- bool IsICE = CoprocArg->isIntegerConstantExpr(CoprocNoAP, Context);
- (void)IsICE;
- assert(IsICE && "Coprocossor immediate is not a constant expression");
+ llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
int64_t CoprocNo = CoprocNoAP.getExtValue();
assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
@@ -2559,54 +2554,164 @@ bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
}
-bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
- assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
- BuiltinID == BPF::BI__builtin_btf_type_id) &&
- "unexpected ARM builtin");
+static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
+ if (Arg->getType()->getAsPlaceholderType())
+ return false;
- if (checkArgCount(*this, TheCall, 2))
+ // The first argument needs to be a record field access.
+ // If it is an array element access, we delay decision
+ // to BPF backend to check whether the access is a
+ // field access or not.
+ return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
+ dyn_cast<MemberExpr>(Arg->IgnoreParens()) ||
+ dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens()));
+}
+
+static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S,
+ QualType VectorTy, QualType EltTy) {
+ QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType();
+ if (!Context.hasSameType(VectorEltTy, EltTy)) {
+ S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types)
+ << Call->getSourceRange() << VectorEltTy << EltTy;
+ return false;
+ }
+ return true;
+}
+
+static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
+ QualType ArgType = Arg->getType();
+ if (ArgType->getAsPlaceholderType())
+ return false;
+
+ // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type
+ // format:
+ // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
+ // 2. <type> var;
+ // __builtin_preserve_type_info(var, flag);
+ if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) &&
+ !dyn_cast<UnaryOperator>(Arg->IgnoreParens()))
+ return false;
+
+ // Typedef type.
+ if (ArgType->getAs<TypedefType>())
return true;
- Expr *Arg;
- if (BuiltinID == BPF::BI__builtin_btf_type_id) {
- // The second argument needs to be a constant int
- llvm::APSInt Value;
- Arg = TheCall->getArg(1);
- if (!Arg->isIntegerConstantExpr(Value, Context)) {
- Diag(Arg->getBeginLoc(), diag::err_btf_type_id_not_const)
- << 2 << Arg->getSourceRange();
+ // Record type or Enum type.
+ const Type *Ty = ArgType->getUnqualifiedDesugaredType();
+ if (const auto *RT = Ty->getAs<RecordType>()) {
+ if (!RT->getDecl()->getDeclName().isEmpty())
return true;
- }
+ } else if (const auto *ET = Ty->getAs<EnumType>()) {
+ if (!ET->getDecl()->getDeclName().isEmpty())
+ return true;
+ }
- TheCall->setType(Context.UnsignedIntTy);
+ return false;
+}
+
+static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
+ QualType ArgType = Arg->getType();
+ if (ArgType->getAsPlaceholderType())
+ return false;
+
+ // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
+ // format:
+ // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
+ // flag);
+ const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
+ if (!UO)
+ return false;
+
+ const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
+ if (!CE || CE->getCastKind() != CK_IntegralToPointer)
+ return false;
+
+ // The integer must be from an EnumConstantDecl.
+ const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
+ if (!DR)
+ return false;
+
+ const EnumConstantDecl *Enumerator =
+ dyn_cast<EnumConstantDecl>(DR->getDecl());
+ if (!Enumerator)
+ return false;
+
+ // The type must be EnumType.
+ const Type *Ty = ArgType->getUnqualifiedDesugaredType();
+ const auto *ET = Ty->getAs<EnumType>();
+ if (!ET)
return false;
+
+ // The enum value must be supported.
+ for (auto *EDI : ET->getDecl()->enumerators()) {
+ if (EDI == Enumerator)
+ return true;
}
- // The first argument needs to be a record field access.
- // If it is an array element access, we delay decision
- // to BPF backend to check whether the access is a
- // field access or not.
- Arg = TheCall->getArg(0);
- if (Arg->getType()->getAsPlaceholderType() ||
- (Arg->IgnoreParens()->getObjectKind() != OK_BitField &&
- !dyn_cast<MemberExpr>(Arg->IgnoreParens()) &&
- !dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens()))) {
- Diag(Arg->getBeginLoc(), diag::err_preserve_field_info_not_field)
- << 1 << Arg->getSourceRange();
+ return false;
+}
+
+bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
+ BuiltinID == BPF::BI__builtin_btf_type_id ||
+ BuiltinID == BPF::BI__builtin_preserve_type_info ||
+ BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
+ "unexpected BPF builtin");
+
+ if (checkArgCount(*this, TheCall, 2))
return true;
- }
// The second argument needs to be a constant int
- Arg = TheCall->getArg(1);
- llvm::APSInt Value;
- if (!Arg->isIntegerConstantExpr(Value, Context)) {
- Diag(Arg->getBeginLoc(), diag::err_preserve_field_info_not_const)
- << 2 << Arg->getSourceRange();
+ Expr *Arg = TheCall->getArg(1);
+ Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
+ diag::kind kind;
+ if (!Value) {
+ if (BuiltinID == BPF::BI__builtin_preserve_field_info)
+ kind = diag::err_preserve_field_info_not_const;
+ else if (BuiltinID == BPF::BI__builtin_btf_type_id)
+ kind = diag::err_btf_type_id_not_const;
+ else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
+ kind = diag::err_preserve_type_info_not_const;
+ else
+ kind = diag::err_preserve_enum_value_not_const;
+ Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
return true;
}
- TheCall->setType(Context.UnsignedIntTy);
+ // The first argument
+ Arg = TheCall->getArg(0);
+ bool InvalidArg = false;
+ bool ReturnUnsignedInt = true;
+ if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
+ if (!isValidBPFPreserveFieldInfoArg(Arg)) {
+ InvalidArg = true;
+ kind = diag::err_preserve_field_info_not_field;
+ }
+ } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
+ if (!isValidBPFPreserveTypeInfoArg(Arg)) {
+ InvalidArg = true;
+ kind = diag::err_preserve_type_info_invalid;
+ }
+ } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
+ if (!isValidBPFPreserveEnumValueArg(Arg)) {
+ InvalidArg = true;
+ kind = diag::err_preserve_enum_value_invalid;
+ }
+ ReturnUnsignedInt = false;
+ } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
+ ReturnUnsignedInt = false;
+ }
+
+ if (InvalidArg) {
+ Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
+ return true;
+ }
+
+ if (ReturnUnsignedInt)
+ TheCall->setType(Context.UnsignedIntTy);
+ else
+ TheCall->setType(Context.UnsignedLongTy);
return false;
}
@@ -3067,6 +3172,62 @@ bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
SemaBuiltinConstantArgMultiple(TheCall, i, m);
}
+/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
+/// advancing the pointer over the consumed characters. The decoded type is
+/// returned. If the decoded type represents a constant integer with a
+/// constraint on its value then Mask is set to that value. The type descriptors
+/// used in Str are specific to PPC MMA builtins and are documented in the file
+/// defining the PPC builtins.
+static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
+ unsigned &Mask) {
+ bool RequireICE = false;
+ ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
+ switch (*Str++) {
+ case 'V':
+ return Context.getVectorType(Context.UnsignedCharTy, 16,
+ VectorType::VectorKind::AltiVecVector);
+ case 'i': {
+ char *End;
+ unsigned size = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing constant parameter constraint");
+ Str = End;
+ Mask = size;
+ return Context.IntTy;
+ }
+ case 'W': {
+ char *End;
+ unsigned size = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing PowerPC MMA type size");
+ Str = End;
+ QualType Type;
+ switch (size) {
+ #define PPC_VECTOR_TYPE(typeName, Id, size) \
+ case size: Type = Context.Id##Ty; break;
+ #include "clang/Basic/PPCTypes.def"
+ default: llvm_unreachable("Invalid PowerPC MMA vector type");
+ }
+ bool CheckVectorArgs = false;
+ while (!CheckVectorArgs) {
+ switch (*Str++) {
+ case '*':
+ Type = Context.getPointerType(Type);
+ break;
+ case 'C':
+ Type = Type.withConst();
+ break;
+ default:
+ CheckVectorArgs = true;
+ --Str;
+ break;
+ }
+ }
+ return Type;
+ }
+ default:
+ return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
+ }
+}
+
bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall) {
unsigned i = 0, l = 0, u = 0;
@@ -3127,6 +3288,14 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return SemaVSXCheck(TheCall);
case PPC::BI__builtin_altivec_vgnb:
return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
+ case PPC::BI__builtin_altivec_vec_replace_elt:
+ case PPC::BI__builtin_altivec_vec_replace_unaligned: {
+ QualType VecTy = TheCall->getArg(0)->getType();
+ QualType EltTy = TheCall->getArg(1)->getType();
+ unsigned Width = Context.getIntWidth(EltTy);
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) ||
+ !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy);
+ }
case PPC::BI__builtin_vsx_xxeval:
return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
case PPC::BI__builtin_altivec_vsldbi:
@@ -3135,10 +3304,31 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
case PPC::BI__builtin_vsx_xxpermx:
return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
+#define CUSTOM_BUILTIN(Name, Types, Acc) \
+ case PPC::BI__builtin_##Name: \
+ return SemaBuiltinPPCMMACall(TheCall, Types);
+#include "clang/Basic/BuiltinsPPC.def"
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
+// Check if the given type is a non-pointer PPC MMA type. This function is used
+// in Sema to prevent invalid uses of restricted PPC MMA types.
+bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
+ if (Type->isPointerType() || Type->isArrayType())
+ return false;
+
+ QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
+#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
+ if (false
+#include "clang/Basic/PPCTypes.def"
+ ) {
+ Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
+ return true;
+ }
+ return false;
+}
+
bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
// position of memory order and scope arguments in the builtin
@@ -3186,8 +3376,7 @@ bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
ArgExpr = Arg.get();
Expr::EvalResult ArgResult1;
// Check that sync scope is a constant literal
- if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Expr::EvaluateForCodeGen,
- Context))
+ if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
<< ArgExpr->getType();
@@ -3198,11 +3387,10 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
if (BuiltinID == SystemZ::BI__builtin_tabort) {
Expr *Arg = TheCall->getArg(0);
- llvm::APSInt AbortCode(32);
- if (Arg->isIntegerConstantExpr(AbortCode, Context) &&
- AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256)
- return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
- << Arg->getSourceRange();
+ if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context))
+ if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
+ return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
+ << Arg->getSourceRange();
}
// For intrinsics which take an immediate value as part of the instruction,
@@ -3611,7 +3799,7 @@ bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
enum { TileRegLow = 0, TileRegHigh = 7 };
bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
- ArrayRef<int> ArgNums) {
+ ArrayRef<int> ArgNums) {
for (int ArgNum : ArgNums) {
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
return true;
@@ -3619,19 +3807,20 @@ bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
return false;
}
-bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum) {
- return SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh);
-}
-
bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums) {
// Because the max number of tile register is TileRegHigh + 1, so here we use
// each bit to represent the usage of them in bitset.
std::bitset<TileRegHigh + 1> ArgValues;
for (int ArgNum : ArgNums) {
- llvm::APSInt Arg;
- SemaBuiltinConstantArg(TheCall, ArgNum, Arg);
- int ArgExtValue = Arg.getExtValue();
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ continue;
+
+ llvm::APSInt Result;
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+ int ArgExtValue = Result.getExtValue();
assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) &&
"Incorrect tile register num.");
if (ArgValues.test(ArgExtValue))
@@ -4382,6 +4571,8 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
if (!FnInfo)
return false;
+ CheckTCBEnforcement(TheCall, FDecl);
+
CheckAbsoluteValueFunction(TheCall, FDecl);
CheckMaxUnsignedZero(TheCall, FDecl);
@@ -4389,16 +4580,24 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
unsigned CMId = FDecl->getMemoryFunctionKind();
- if (CMId == 0)
- return false;
// Handle memory setting and copying functions.
- if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat)
+ switch (CMId) {
+ case 0:
+ return false;
+ case Builtin::BIstrlcpy: // fallthrough
+ case Builtin::BIstrlcat:
CheckStrlcpycatArguments(TheCall, FnInfo);
- else if (CMId == Builtin::BIstrncat)
+ break;
+ case Builtin::BIstrncat:
CheckStrncatArguments(TheCall, FnInfo);
- else
+ break;
+ case Builtin::BIfree:
+ CheckFreeArguments(TheCall);
+ break;
+ default:
CheckMemaccessArguments(TheCall, CMId, FnInfo);
+ }
return false;
}
@@ -4923,21 +5122,21 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
}
if (SubExprs.size() >= 2 && Form != Init) {
- llvm::APSInt Result(32);
- if (SubExprs[1]->isIntegerConstantExpr(Result, Context) &&
- !isValidOrderingForOp(Result.getSExtValue(), Op))
- Diag(SubExprs[1]->getBeginLoc(),
- diag::warn_atomic_op_has_invalid_memory_order)
- << SubExprs[1]->getSourceRange();
+ if (Optional<llvm::APSInt> Result =
+ SubExprs[1]->getIntegerConstantExpr(Context))
+ if (!isValidOrderingForOp(Result->getSExtValue(), Op))
+ Diag(SubExprs[1]->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << SubExprs[1]->getSourceRange();
}
if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
auto *Scope = Args[Args.size() - 1];
- llvm::APSInt Result(32);
- if (Scope->isIntegerConstantExpr(Result, Context) &&
- !ScopeModel->isValid(Result.getZExtValue())) {
- Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
- << Scope->getSourceRange();
+ if (Optional<llvm::APSInt> Result =
+ Scope->getIntegerConstantExpr(Context)) {
+ if (!ScopeModel->isValid(Result->getZExtValue()))
+ Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
+ << Scope->getSourceRange();
}
SubExprs.push_back(Scope);
}
@@ -4956,6 +5155,11 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
? 0
: 1);
+ if (ValType->isExtIntType()) {
+ Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit);
+ return ExprError();
+ }
+
return AE;
}
@@ -5574,21 +5778,8 @@ bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
if (checkVAStartABI(*this, BuiltinID, Fn))
return true;
- if (TheCall->getNumArgs() > 2) {
- Diag(TheCall->getArg(2)->getBeginLoc(),
- diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/ << 2 << TheCall->getNumArgs()
- << Fn->getSourceRange()
- << SourceRange(TheCall->getArg(2)->getBeginLoc(),
- (*(TheCall->arg_end() - 1))->getEndLoc());
+ if (checkArgCount(*this, TheCall, 2))
return true;
- }
-
- if (TheCall->getNumArgs() < 2) {
- return Diag(TheCall->getEndLoc(),
- diag::err_typecheck_call_too_few_args_at_least)
- << 0 /*function call*/ << 2 << TheCall->getNumArgs();
- }
// Type-check the first argument normally.
if (checkBuiltinArgument(*this, TheCall, 0))
@@ -5698,15 +5889,8 @@ bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
/// friends. This is declared to take (...), so we have to check everything.
bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
- if (TheCall->getNumArgs() < 2)
- return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
- << 0 << 2 << TheCall->getNumArgs() /*function call*/;
- if (TheCall->getNumArgs() > 2)
- return Diag(TheCall->getArg(2)->getBeginLoc(),
- diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/ << 2 << TheCall->getNumArgs()
- << SourceRange(TheCall->getArg(2)->getBeginLoc(),
- (*(TheCall->arg_end() - 1))->getEndLoc());
+ if (checkArgCount(*this, TheCall, 2))
+ return true;
ExprResult OrigArg0 = TheCall->getArg(0);
ExprResult OrigArg1 = TheCall->getArg(1);
@@ -5744,15 +5928,8 @@ bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
/// to check everything. We expect the last argument to be a floating point
/// value.
bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
- if (TheCall->getNumArgs() < NumArgs)
- return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
- << 0 << NumArgs << TheCall->getNumArgs() /*function call*/;
- if (TheCall->getNumArgs() > NumArgs)
- return Diag(TheCall->getArg(NumArgs)->getBeginLoc(),
- diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/ << NumArgs << TheCall->getNumArgs()
- << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(),
- (*(TheCall->arg_end() - 1))->getEndLoc());
+ if (checkArgCount(*this, TheCall, NumArgs))
+ return true;
// __builtin_fpclassify is the only case where NumArgs != 1, so we can count
// on all preceding parameters just being int. Try all of those.
@@ -5792,6 +5969,61 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
return false;
}
+/// Perform semantic analysis for a call to __builtin_complex.
+bool Sema::SemaBuiltinComplex(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 2))
+ return true;
+
+ bool Dependent = false;
+ for (unsigned I = 0; I != 2; ++I) {
+ Expr *Arg = TheCall->getArg(I);
+ QualType T = Arg->getType();
+ if (T->isDependentType()) {
+ Dependent = true;
+ continue;
+ }
+
+ // Despite supporting _Complex int, GCC requires a real floating point type
+ // for the operands of __builtin_complex.
+ if (!T->isRealFloatingType()) {
+ return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp)
+ << Arg->getType() << Arg->getSourceRange();
+ }
+
+ ExprResult Converted = DefaultLvalueConversion(Arg);
+ if (Converted.isInvalid())
+ return true;
+ TheCall->setArg(I, Converted.get());
+ }
+
+ if (Dependent) {
+ TheCall->setType(Context.DependentTy);
+ return false;
+ }
+
+ Expr *Real = TheCall->getArg(0);
+ Expr *Imag = TheCall->getArg(1);
+ if (!Context.hasSameType(Real->getType(), Imag->getType())) {
+ return Diag(Real->getBeginLoc(),
+ diag::err_typecheck_call_different_arg_types)
+ << Real->getType() << Imag->getType()
+ << Real->getSourceRange() << Imag->getSourceRange();
+ }
+
+ // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers;
+ // don't allow this builtin to form those types either.
+ // FIXME: Should we allow these types?
+ if (Real->getType()->isFloat16Type())
+ return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
+ << "_Float16";
+ if (Real->getType()->isHalfType())
+ return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
+ << "half";
+
+ TheCall->setType(Context.getComplexType(Real->getType()));
+ return false;
+}
+
// Customized Sema Checking for VSX builtins that have the following signature:
// vector [...] builtinName(vector [...], vector [...], const int);
// Which takes the same type of vectors (any legal vector type) for the first
@@ -5801,21 +6033,11 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
// vector short vec_xxsldwi(vector short, vector short, int);
bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
unsigned ExpectedNumArgs = 3;
- if (TheCall->getNumArgs() < ExpectedNumArgs)
- return Diag(TheCall->getEndLoc(),
- diag::err_typecheck_call_too_few_args_at_least)
- << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
- << TheCall->getSourceRange();
-
- if (TheCall->getNumArgs() > ExpectedNumArgs)
- return Diag(TheCall->getEndLoc(),
- diag::err_typecheck_call_too_many_args_at_most)
- << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
- << TheCall->getSourceRange();
+ if (checkArgCount(*this, TheCall, ExpectedNumArgs))
+ return true;
// Check the third argument is a compile time constant
- llvm::APSInt Value;
- if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context))
+ if (!TheCall->getArg(2)->isIntegerConstantExpr(Context))
return Diag(TheCall->getBeginLoc(),
diag::err_vsx_builtin_nonconstant_argument)
<< 3 /* argument index */ << TheCall->getDirectCallee()
@@ -5910,17 +6132,18 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
TheCall->getArg(i)->isValueDependent())
continue;
- llvm::APSInt Result(32);
- if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
+ Optional<llvm::APSInt> Result;
+ if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context)))
return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_shufflevector_nonconstant_argument)
<< TheCall->getArg(i)->getSourceRange());
// Allow -1 which will be translated to undef in the IR.
- if (Result.isSigned() && Result.isAllOnesValue())
+ if (Result->isSigned() && Result->isAllOnesValue())
continue;
- if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
+ if (Result->getActiveBits() > 64 ||
+ Result->getZExtValue() >= numElements * 2)
return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_shufflevector_argument_too_large)
<< TheCall->getArg(i)->getSourceRange());
@@ -6167,10 +6390,11 @@ bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
- if (!Arg->isIntegerConstantExpr(Result, Context))
+ Optional<llvm::APSInt> R;
+ if (!(R = Arg->getIntegerConstantExpr(Context)))
return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type)
<< FDecl->getDeclName() << Arg->getSourceRange();
-
+ Result = *R;
return false;
}
@@ -6588,6 +6812,64 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
return false;
}
+/// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity.
+/// Emit an error and return true on failure; return false on success.
+/// TypeStr is a string containing the type descriptor of the value returned by
+/// the builtin and the descriptors of the expected type of the arguments.
+bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeStr) {
+
+ assert((TypeStr[0] != '\0') &&
+ "Invalid types in PPC MMA builtin declaration");
+
+ unsigned Mask = 0;
+ unsigned ArgNum = 0;
+
+ // The first type in TypeStr is the type of the value returned by the
+ // builtin. So we first read that type and change the type of TheCall.
+ QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
+ TheCall->setType(type);
+
+ while (*TypeStr != '\0') {
+ Mask = 0;
+ QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
+ if (ArgNum >= TheCall->getNumArgs()) {
+ ArgNum++;
+ break;
+ }
+
+ Expr *Arg = TheCall->getArg(ArgNum);
+ QualType ArgType = Arg->getType();
+
+ if ((ExpectedType->isVoidPointerType() && !ArgType->isPointerType()) ||
+ (!ExpectedType->isVoidPointerType() &&
+ ArgType.getCanonicalType() != ExpectedType))
+ return Diag(Arg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
+ << ArgType << ExpectedType << 1 << 0 << 0;
+
+ // If the value of the Mask is not 0, we have a constraint in the size of
+ // the integer argument so here we ensure the argument is a constant that
+ // is in the valid range.
+ if (Mask != 0 &&
+ SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true))
+ return true;
+
+ ArgNum++;
+ }
+
+ // In case we exited early from the previous loop, there are other types to
+ // read from TypeStr. So we need to read them all to ensure we have the right
+ // number of arguments in TheCall and if it is not the case, to display a
+ // better error message.
+ while (*TypeStr != '\0') {
+ (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
+ ArgNum++;
+ }
+ if (checkArgCount(*this, TheCall, ArgNum))
+ return true;
+
+ return false;
+}
+
/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
/// This checks that the target supports __builtin_longjmp and
/// that val is a constant 1.
@@ -9966,6 +10248,68 @@ void Sema::CheckStrncatArguments(const CallExpr *CE,
<< FixItHint::CreateReplacement(SR, OS.str());
}
+namespace {
+void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName,
+ const UnaryOperator *UnaryExpr,
+ const VarDecl *Var) {
+ StorageClass Class = Var->getStorageClass();
+ if (Class == StorageClass::SC_Extern ||
+ Class == StorageClass::SC_PrivateExtern ||
+ Var->getType()->isReferenceType())
+ return;
+
+ S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object)
+ << CalleeName << Var;
+}
+
+void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName,
+ const UnaryOperator *UnaryExpr, const Decl *D) {
+ if (const auto *Field = dyn_cast<FieldDecl>(D))
+ S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object)
+ << CalleeName << Field;
+}
+
+void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName,
+ const UnaryOperator *UnaryExpr) {
+ if (UnaryExpr->getOpcode() != UnaryOperator::Opcode::UO_AddrOf)
+ return;
+
+ if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr()))
+ if (const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()))
+ return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, Var);
+
+ if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr()))
+ return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr,
+ Lvalue->getMemberDecl());
+}
+
+void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName,
+ const DeclRefExpr *Lvalue) {
+ if (!Lvalue->getType()->isArrayType())
+ return;
+
+ const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl());
+ if (Var == nullptr)
+ return;
+
+ S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object)
+ << CalleeName << Var;
+}
+} // namespace
+
+/// Alerts the user that they are attempting to free a non-malloc'd object.
+void Sema::CheckFreeArguments(const CallExpr *E) {
+ const Expr *Arg = E->getArg(0)->IgnoreParenCasts();
+ const std::string CalleeName =
+ dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
+
+ if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg))
+ return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr);
+
+ if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg))
+ return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue);
+}
+
void
Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
@@ -9995,6 +10339,11 @@ Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
<< FD << getLangOpts().CPlusPlus11;
}
}
+
+ // PPC MMA non-pointer types are not allowed as return type. Checking the type
+ // here prevent the user from using a PPC MMA type as trailing return type.
+ if (Context.getTargetInfo().getTriple().isPPC64())
+ CheckPPCMMAType(RetValExp->getType(), ReturnLoc);
}
//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
@@ -10048,15 +10397,23 @@ namespace {
/// Structure recording the 'active' range of an integer-valued
/// expression.
struct IntRange {
- /// The number of bits active in the int.
+ /// The number of bits active in the int. Note that this includes exactly one
+ /// sign bit if !NonNegative.
unsigned Width;
- /// True if the int is known not to have negative values.
+ /// True if the int is known not to have negative values. If so, all leading
+ /// bits before Width are known zero, otherwise they are known to be the
+ /// same as the MSB within Width.
bool NonNegative;
IntRange(unsigned Width, bool NonNegative)
: Width(Width), NonNegative(NonNegative) {}
+ /// Number of bits excluding the sign bit.
+ unsigned valueBits() const {
+ return NonNegative ? Width : Width - 1;
+ }
+
/// Returns the range of the bool type.
static IntRange forBoolType() {
return IntRange(1, true);
@@ -10140,14 +10497,63 @@ struct IntRange {
/// Returns the supremum of two ranges: i.e. their conservative merge.
static IntRange join(IntRange L, IntRange R) {
- return IntRange(std::max(L.Width, R.Width),
+ bool Unsigned = L.NonNegative && R.NonNegative;
+ return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned,
L.NonNegative && R.NonNegative);
}
- /// Returns the infinum of two ranges: i.e. their aggressive merge.
- static IntRange meet(IntRange L, IntRange R) {
- return IntRange(std::min(L.Width, R.Width),
- L.NonNegative || R.NonNegative);
+ /// Return the range of a bitwise-AND of the two ranges.
+ static IntRange bit_and(IntRange L, IntRange R) {
+ unsigned Bits = std::max(L.Width, R.Width);
+ bool NonNegative = false;
+ if (L.NonNegative) {
+ Bits = std::min(Bits, L.Width);
+ NonNegative = true;
+ }
+ if (R.NonNegative) {
+ Bits = std::min(Bits, R.Width);
+ NonNegative = true;
+ }
+ return IntRange(Bits, NonNegative);
+ }
+
+ /// Return the range of a sum of the two ranges.
+ static IntRange sum(IntRange L, IntRange R) {
+ bool Unsigned = L.NonNegative && R.NonNegative;
+ return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned,
+ Unsigned);
+ }
+
+ /// Return the range of a difference of the two ranges.
+ static IntRange difference(IntRange L, IntRange R) {
+ // We need a 1-bit-wider range if:
+ // 1) LHS can be negative: least value can be reduced.
+ // 2) RHS can be negative: greatest value can be increased.
+ bool CanWiden = !L.NonNegative || !R.NonNegative;
+ bool Unsigned = L.NonNegative && R.Width == 0;
+ return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden +
+ !Unsigned,
+ Unsigned);
+ }
+
+ /// Return the range of a product of the two ranges.
+ static IntRange product(IntRange L, IntRange R) {
+ // If both LHS and RHS can be negative, we can form
+ // -2^L * -2^R = 2^(L + R)
+ // which requires L + R + 1 value bits to represent.
+ bool CanWiden = !L.NonNegative && !R.NonNegative;
+ bool Unsigned = L.NonNegative && R.NonNegative;
+ return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned,
+ Unsigned);
+ }
+
+ /// Return the range of a remainder operation between the two ranges.
+ static IntRange rem(IntRange L, IntRange R) {
+ // The result of a remainder can't be larger than the result of
+ // either side. The sign of the result is the sign of the LHS.
+ bool Unsigned = L.NonNegative;
+ return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned,
+ Unsigned);
}
};
@@ -10205,9 +10611,13 @@ static QualType GetExprType(const Expr *E) {
/// Pseudo-evaluate the given integer expression, estimating the
/// range of values it might take.
///
-/// \param MaxWidth - the width to which the value will be truncated
+/// \param MaxWidth The width to which the value will be truncated.
+/// \param Approximate If \c true, return a likely range for the result: in
+/// particular, assume that aritmetic on narrower types doesn't leave
+/// those types. If \c false, return a range including all possible
+/// result values.
static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
- bool InConstantContext) {
+ bool InConstantContext, bool Approximate) {
E = E->IgnoreParens();
// Try a full evaluation first.
@@ -10220,7 +10630,8 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
// being of the new, wider type.
if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) {
if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
- return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext);
+ return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext,
+ Approximate);
IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE));
@@ -10233,7 +10644,7 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
IntRange SubRange = GetExprRange(C, CE->getSubExpr(),
std::min(MaxWidth, OutputTypeRange.Width),
- InConstantContext);
+ InConstantContext, Approximate);
// Bail out if the subexpr's range is as wide as the cast type.
if (SubRange.Width >= OutputTypeRange.Width)
@@ -10251,17 +10662,25 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C))
return GetExprRange(C,
CondResult ? CO->getTrueExpr() : CO->getFalseExpr(),
- MaxWidth, InConstantContext);
+ MaxWidth, InConstantContext, Approximate);
// Otherwise, conservatively merge.
- IntRange L =
- GetExprRange(C, CO->getTrueExpr(), MaxWidth, InConstantContext);
- IntRange R =
- GetExprRange(C, CO->getFalseExpr(), MaxWidth, InConstantContext);
+ // GetExprRange requires an integer expression, but a throw expression
+ // results in a void type.
+ Expr *E = CO->getTrueExpr();
+ IntRange L = E->getType()->isVoidType()
+ ? IntRange{0, true}
+ : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate);
+ E = CO->getFalseExpr();
+ IntRange R = E->getType()->isVoidType()
+ ? IntRange{0, true}
+ : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate);
return IntRange::join(L, R);
}
if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
+ IntRange (*Combine)(IntRange, IntRange) = IntRange::join;
+
switch (BO->getOpcode()) {
case BO_Cmp:
llvm_unreachable("builtin <=> should have class type");
@@ -10293,7 +10712,8 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
// been coerced to the LHS type.
case BO_Assign:
// TODO: bitfields?
- return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext);
+ return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext,
+ Approximate);
// Operations with opaque sources are black-listed.
case BO_PtrMemD:
@@ -10303,9 +10723,8 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
// Bitwise-and uses the *infinum* of the two source ranges.
case BO_And:
case BO_AndAssign:
- return IntRange::meet(
- GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext),
- GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext));
+ Combine = IntRange::bit_and;
+ break;
// Left shift gets black-listed based on a judgement call.
case BO_Shl:
@@ -10326,18 +10745,20 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
// Right shift by a constant can narrow its left argument.
case BO_Shr:
case BO_ShrAssign: {
- IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext);
+ IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext,
+ Approximate);
// If the shift amount is a positive constant, drop the width by
// that much.
- llvm::APSInt shift;
- if (BO->getRHS()->isIntegerConstantExpr(shift, C) &&
- shift.isNonNegative()) {
- unsigned zext = shift.getZExtValue();
- if (zext >= L.Width)
- L.Width = (L.NonNegative ? 0 : 1);
- else
- L.Width -= zext;
+ if (Optional<llvm::APSInt> shift =
+ BO->getRHS()->getIntegerConstantExpr(C)) {
+ if (shift->isNonNegative()) {
+ unsigned zext = shift->getZExtValue();
+ if (zext >= L.Width)
+ L.Width = (L.NonNegative ? 0 : 1);
+ else
+ L.Width -= zext;
+ }
}
return L;
@@ -10345,12 +10766,24 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
// Comma acts as its right operand.
case BO_Comma:
- return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext);
+ return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext,
+ Approximate);
+
+ case BO_Add:
+ if (!Approximate)
+ Combine = IntRange::sum;
+ break;
- // Black-list pointer subtractions.
case BO_Sub:
if (BO->getLHS()->getType()->isPointerType())
return IntRange::forValueOfType(C, GetExprType(E));
+ if (!Approximate)
+ Combine = IntRange::difference;
+ break;
+
+ case BO_Mul:
+ if (!Approximate)
+ Combine = IntRange::product;
break;
// The width of a division result is mostly determined by the size
@@ -10358,12 +10791,13 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
case BO_Div: {
// Don't 'pre-truncate' the operands.
unsigned opWidth = C.getIntWidth(GetExprType(E));
- IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext);
+ IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext,
+ Approximate);
// If the divisor is constant, use that.
- llvm::APSInt divisor;
- if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) {
- unsigned log2 = divisor.logBase2(); // floor(log_2(divisor))
+ if (Optional<llvm::APSInt> divisor =
+ BO->getRHS()->getIntegerConstantExpr(C)) {
+ unsigned log2 = divisor->logBase2(); // floor(log_2(divisor))
if (log2 >= L.Width)
L.Width = (L.NonNegative ? 0 : 1);
else
@@ -10372,36 +10806,35 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
}
// Otherwise, just use the LHS's width.
- IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext);
+ // FIXME: This is wrong if the LHS could be its minimal value and the RHS
+ // could be -1.
+ IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext,
+ Approximate);
return IntRange(L.Width, L.NonNegative && R.NonNegative);
}
- // The result of a remainder can't be larger than the result of
- // either side.
- case BO_Rem: {
- // Don't 'pre-truncate' the operands.
- unsigned opWidth = C.getIntWidth(GetExprType(E));
- IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext);
- IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext);
-
- IntRange meet = IntRange::meet(L, R);
- meet.Width = std::min(meet.Width, MaxWidth);
- return meet;
- }
+ case BO_Rem:
+ Combine = IntRange::rem;
+ break;
// The default behavior is okay for these.
- case BO_Mul:
- case BO_Add:
case BO_Xor:
case BO_Or:
break;
}
- // The default case is to treat the operation as if it were closed
- // on the narrowest type that encompasses both operands.
- IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext);
- IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext);
- return IntRange::join(L, R);
+ // Combine the two ranges, but limit the result to the type in which we
+ // performed the computation.
+ QualType T = GetExprType(E);
+ unsigned opWidth = C.getIntWidth(T);
+ IntRange L =
+ GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate);
+ IntRange R =
+ GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate);
+ IntRange C = Combine(L, R);
+ C.NonNegative |= T->isUnsignedIntegerOrEnumerationType();
+ C.Width = std::min(C.Width, MaxWidth);
+ return C;
}
if (const auto *UO = dyn_cast<UnaryOperator>(E)) {
@@ -10416,12 +10849,14 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
return IntRange::forValueOfType(C, GetExprType(E));
default:
- return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext);
+ return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext,
+ Approximate);
}
}
if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
- return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext);
+ return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext,
+ Approximate);
if (const auto *BitField = E->getSourceBitField())
return IntRange(BitField->getBitWidthValue(C),
@@ -10431,8 +10866,9 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
}
static IntRange GetExprRange(ASTContext &C, const Expr *E,
- bool InConstantContext) {
- return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext);
+ bool InConstantContext, bool Approximate) {
+ return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext,
+ Approximate);
}
/// Checks whether the given value, which currently has the given
@@ -10677,15 +11113,16 @@ static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType()))
return false;
- // TODO: Investigate using GetExprRange() to get tighter bounds
- // on the bit ranges.
+ IntRange OtherValueRange = GetExprRange(
+ S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false);
+
QualType OtherT = Other->getType();
if (const auto *AT = OtherT->getAs<AtomicType>())
OtherT = AT->getValueType();
- IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT);
+ IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT);
// Special case for ObjC BOOL on targets where its a typedef for a signed char
- // (Namely, macOS).
+ // (Namely, macOS). FIXME: IntRange::forValueOfType should do this.
bool IsObjCSignedCharBool = S.getLangOpts().ObjC &&
S.NSAPIObj->isObjCBOOLType(OtherT) &&
OtherT->isSpecificBuiltinType(BuiltinType::SChar);
@@ -10695,17 +11132,37 @@ static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
bool OtherIsBooleanDespiteType =
!OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue();
if (OtherIsBooleanDespiteType || IsObjCSignedCharBool)
- OtherRange = IntRange::forBoolType();
+ OtherTypeRange = OtherValueRange = IntRange::forBoolType();
- // Determine the promoted range of the other type and see if a comparison of
- // the constant against that range is tautological.
- PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(),
- Value.isUnsigned());
- auto Cmp = OtherPromotedRange.compare(Value);
+ // Check if all values in the range of possible values of this expression
+ // lead to the same comparison outcome.
+ PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(),
+ Value.isUnsigned());
+ auto Cmp = OtherPromotedValueRange.compare(Value);
auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant);
if (!Result)
return false;
+ // Also consider the range determined by the type alone. This allows us to
+ // classify the warning under the proper diagnostic group.
+ bool TautologicalTypeCompare = false;
+ {
+ PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(),
+ Value.isUnsigned());
+ auto TypeCmp = OtherPromotedTypeRange.compare(Value);
+ if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp,
+ RhsConstant)) {
+ TautologicalTypeCompare = true;
+ Cmp = TypeCmp;
+ Result = TypeResult;
+ }
+ }
+
+ // Don't warn if the non-constant operand actually always evaluates to the
+ // same value.
+ if (!TautologicalTypeCompare && OtherValueRange.Width == 0)
+ return false;
+
// Suppress the diagnostic for an in-range comparison if the constant comes
// from a macro or enumerator. We don't want to diagnose
//
@@ -10716,6 +11173,12 @@ static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
if (InRange && IsEnumConstOrFromMacro(S, Constant))
return false;
+ // A comparison of an unsigned bit-field against 0 is really a type problem,
+ // even though at the type level the bit-field might promote to 'signed int'.
+ if (Other->refersToBitField() && InRange && Value == 0 &&
+ Other->getType()->isUnsignedIntegerOrEnumerationType())
+ TautologicalTypeCompare = true;
+
// If this is a comparison to an enum constant, include that
// constant in the diagnostic.
const EnumConstantDecl *ED = nullptr;
@@ -10734,6 +11197,14 @@ static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
OS << Value;
}
+ if (!TautologicalTypeCompare) {
+ S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range)
+ << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative
+ << E->getOpcodeStr() << OS.str() << *Result
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+ return true;
+ }
+
if (IsObjCSignedCharBool) {
S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
S.PDiag(diag::warn_tautological_compare_objc_bool)
@@ -10795,23 +11266,20 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
Expr *RHS = E->getRHS();
if (T->isIntegralType(S.Context)) {
- llvm::APSInt RHSValue;
- llvm::APSInt LHSValue;
-
- bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context);
- bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context);
+ Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context);
+ Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context);
// We don't care about expressions whose result is a constant.
- if (IsRHSIntegralLiteral && IsLHSIntegralLiteral)
+ if (RHSValue && LHSValue)
return AnalyzeImpConvsInComparison(S, E);
// We only care about expressions where just one side is literal
- if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) {
+ if ((bool)RHSValue ^ (bool)LHSValue) {
// Is the constant on the RHS or LHS?
- const bool RhsConstant = IsRHSIntegralLiteral;
+ const bool RhsConstant = (bool)RHSValue;
Expr *Const = RhsConstant ? RHS : LHS;
Expr *Other = RhsConstant ? LHS : RHS;
- const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue;
+ const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue;
// Check whether an integer constant comparison results in a value
// of 'true' or 'false'.
@@ -10856,8 +11324,8 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
}
// Otherwise, calculate the effective range of the signed operand.
- IntRange signedRange =
- GetExprRange(S.Context, signedOperand, S.isConstantEvaluated());
+ IntRange signedRange = GetExprRange(
+ S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true);
// Go ahead and analyze implicit conversions in the operands. Note
// that we skip the implicit conversions on both sides.
@@ -10875,7 +11343,8 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
if (E->isEqualityOp()) {
unsigned comparisonWidth = S.Context.getIntWidth(T);
IntRange unsignedRange =
- GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated());
+ GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(),
+ /*Approximate*/ true);
// We should never be unable to prove that the unsigned operand is
// non-negative.
@@ -10915,7 +11384,7 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
BitfieldEnumDecl->getNumPositiveBits() > 0 &&
BitfieldEnumDecl->getNumNegativeBits() == 0) {
S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield)
- << BitfieldEnumDecl->getNameAsString();
+ << BitfieldEnumDecl;
}
}
@@ -11695,9 +12164,9 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
Expr::EvalResult Result;
if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects,
S.isConstantEvaluated())) {
- APFixedPoint Value = Result.Val.getFixedPoint();
- APFixedPoint MaxVal = S.Context.getFixedPointMax(T);
- APFixedPoint MinVal = S.Context.getFixedPointMin(T);
+ llvm::APFixedPoint Value = Result.Val.getFixedPoint();
+ llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T);
+ llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T);
if (Value > MaxVal || Value < MinVal) {
S.DiagRuntimeBehavior(E->getExprLoc(), E,
S.PDiag(diag::warn_impcast_fixed_point_range)
@@ -11712,7 +12181,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (!S.isConstantEvaluated() &&
E->EvaluateAsFixedPoint(Result, S.Context,
Expr::SE_AllowSideEffects)) {
- APFixedPoint FXResult = Result.Val.getFixedPoint();
+ llvm::APFixedPoint FXResult = Result.Val.getFixedPoint();
bool Overflowed;
llvm::APSInt IntResult = FXResult.convertToInt(
@@ -11737,7 +12206,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
llvm::APSInt Value = Result.Val.getInt();
bool Overflowed;
- APFixedPoint IntResult = APFixedPoint::getFromIntValue(
+ llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue(
Value, S.Context.getFixedPointSemantics(T), &Overflowed);
if (Overflowed) {
@@ -11758,7 +12227,8 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (SourceBT && TargetBT && SourceBT->isIntegerType() &&
TargetBT->isFloatingType() && !IsListInit) {
// Determine the number of precision bits in the source integer type.
- IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated());
+ IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(),
+ /*Approximate*/ true);
unsigned int SourcePrecision = SourceRange.Width;
// Determine the number of precision bits in the
@@ -11769,8 +12239,8 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (SourcePrecision > 0 && TargetPrecision > 0 &&
SourcePrecision > TargetPrecision) {
- llvm::APSInt SourceInt;
- if (E->isIntegerConstantExpr(SourceInt, S.Context)) {
+ if (Optional<llvm::APSInt> SourceInt =
+ E->getIntegerConstantExpr(S.Context)) {
// If the source integer is a constant, convert it to the target
// floating point type. Issue a warning if the value changes
// during the whole conversion.
@@ -11778,11 +12248,11 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)));
llvm::APFloat::opStatus ConversionStatus =
TargetFloatValue.convertFromAPInt(
- SourceInt, SourceBT->isSignedInteger(),
+ *SourceInt, SourceBT->isSignedInteger(),
llvm::APFloat::rmNearestTiesToEven);
if (ConversionStatus != llvm::APFloat::opOK) {
- std::string PrettySourceValue = SourceInt.toString(10);
+ std::string PrettySourceValue = SourceInt->toString(10);
SmallString<32> PrettyTargetValue;
TargetFloatValue.toString(PrettyTargetValue, TargetPrecision);
@@ -11823,10 +12293,13 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
<< E->getType());
}
- IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated());
+ IntRange SourceTypeRange =
+ IntRange::forTargetOfCanonicalType(S.Context, Source);
+ IntRange LikelySourceRange =
+ GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true);
IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
- if (SourceRange.Width > TargetRange.Width) {
+ if (LikelySourceRange.Width > TargetRange.Width) {
// If the source is a constant, use a default-on diagnostic.
// TODO: this should happen for bitfield stores, too.
Expr::EvalResult Result;
@@ -11845,7 +12318,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
E->getExprLoc(), E,
S.PDiag(diag::warn_impcast_integer_precision_constant)
<< PrettySourceValue << PrettyTargetValue << E->getType() << T
- << E->getSourceRange() << clang::SourceRange(CC));
+ << E->getSourceRange() << SourceRange(CC));
return;
}
@@ -11859,7 +12332,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
}
- if (TargetRange.Width > SourceRange.Width) {
+ if (TargetRange.Width > SourceTypeRange.Width) {
if (auto *UO = dyn_cast<UnaryOperator>(E))
if (UO->getOpcode() == UO_Minus)
if (Source->isUnsignedIntegerType()) {
@@ -11872,8 +12345,9 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
}
}
- if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative &&
- SourceRange.NonNegative && Source->isSignedIntegerType()) {
+ if (TargetRange.Width == LikelySourceRange.Width &&
+ !TargetRange.NonNegative && LikelySourceRange.NonNegative &&
+ Source->isSignedIntegerType()) {
// Warn when doing a signed to signed conversion, warn if the positive
// source value is exactly the width of the target type, which will
// cause a negative value to be stored.
@@ -11890,7 +12364,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
E->getExprLoc(), E,
S.PDiag(diag::warn_impcast_integer_precision_constant)
<< PrettySourceValue << PrettyTargetValue << E->getType() << T
- << E->getSourceRange() << clang::SourceRange(CC));
+ << E->getSourceRange() << SourceRange(CC));
return;
}
}
@@ -11898,9 +12372,9 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Fall through for non-constants to give a sign conversion warning.
}
- if ((TargetRange.NonNegative && !SourceRange.NonNegative) ||
- (!TargetRange.NonNegative && SourceRange.NonNegative &&
- SourceRange.Width == TargetRange.Width)) {
+ if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) ||
+ (!TargetRange.NonNegative && LikelySourceRange.NonNegative &&
+ LikelySourceRange.Width == TargetRange.Width)) {
if (S.SourceMgr.isInSystemMacro(CC))
return;
@@ -13505,10 +13979,9 @@ getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE,
if (!P)
return llvm::None;
- llvm::APSInt IdxRes;
CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType);
- if (IntE->isIntegerConstantExpr(IdxRes, Ctx)) {
- CharUnits Offset = EltSize * IdxRes.getExtValue();
+ if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) {
+ CharUnits Offset = EltSize * IdxRes->getExtValue();
if (IsSub)
Offset = -Offset;
return std::make_pair(P->first, P->second + Offset);
@@ -13911,8 +14384,7 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
if (ND)
DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
- PDiag(diag::note_array_declared_here)
- << ND->getDeclName());
+ PDiag(diag::note_array_declared_here) << ND);
}
void Sema::CheckArrayAccess(const Expr *expr) {
@@ -14133,9 +14605,10 @@ namespace {
return;
if (Expr *RHS = BinOp->getRHS()) {
RHS = RHS->IgnoreParenCasts();
- llvm::APSInt Value;
+ Optional<llvm::APSInt> Value;
VarWillBeReased =
- (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0);
+ (RHS && (Value = RHS->getIntegerConstantExpr(Context)) &&
+ *Value == 0);
}
}
}
@@ -15362,14 +15835,15 @@ ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
// Get and verify the matrix dimensions.
static llvm::Optional<unsigned>
getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
- llvm::APSInt Value(64);
SourceLocation ErrorPos;
- if (!Expr->isIntegerConstantExpr(Value, S.Context, &ErrorPos)) {
+ Optional<llvm::APSInt> Value =
+ Expr->getIntegerConstantExpr(S.Context, &ErrorPos);
+ if (!Value) {
S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg)
<< Name;
return {};
}
- uint64_t Dim = Value.getZExtValue();
+ uint64_t Dim = Value->getZExtValue();
if (!ConstantMatrixType::isDimensionValid(Dim)) {
S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension)
<< Name << ConstantMatrixType::getMaxElementsPerDimension();
@@ -15473,13 +15947,15 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
StrideExpr = StrideConv.get();
TheCall->setArg(3, StrideExpr);
- llvm::APSInt Value(64);
- if (MaybeRows && StrideExpr->isIntegerConstantExpr(Value, Context)) {
- uint64_t Stride = Value.getZExtValue();
- if (Stride < *MaybeRows) {
- Diag(StrideExpr->getBeginLoc(),
- diag::err_builtin_matrix_stride_too_small);
- ArgError = true;
+ if (MaybeRows) {
+ if (Optional<llvm::APSInt> Value =
+ StrideExpr->getIntegerConstantExpr(Context)) {
+ uint64_t Stride = Value->getZExtValue();
+ if (Stride < *MaybeRows) {
+ Diag(StrideExpr->getBeginLoc(),
+ diag::err_builtin_matrix_stride_too_small);
+ ArgError = true;
+ }
}
}
@@ -15570,13 +16046,15 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
}
// Check stride argument.
- llvm::APSInt Value(64);
- if (MatrixTy && StrideExpr->isIntegerConstantExpr(Value, Context)) {
- uint64_t Stride = Value.getZExtValue();
- if (Stride < MatrixTy->getNumRows()) {
- Diag(StrideExpr->getBeginLoc(),
- diag::err_builtin_matrix_stride_too_small);
- ArgError = true;
+ if (MatrixTy) {
+ if (Optional<llvm::APSInt> Value =
+ StrideExpr->getIntegerConstantExpr(Context)) {
+ uint64_t Stride = Value->getZExtValue();
+ if (Stride < MatrixTy->getNumRows()) {
+ Diag(StrideExpr->getBeginLoc(),
+ diag::err_builtin_matrix_stride_too_small);
+ ArgError = true;
+ }
}
}
@@ -15585,3 +16063,38 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
return CallResult;
}
+
+/// \brief Enforce the bounds of a TCB
+/// CheckTCBEnforcement - Enforces that every function in a named TCB only
+/// directly calls other functions in the same TCB as marked by the enforce_tcb
+/// and enforce_tcb_leaf attributes.
+void Sema::CheckTCBEnforcement(const CallExpr *TheCall,
+ const FunctionDecl *Callee) {
+ const FunctionDecl *Caller = getCurFunctionDecl();
+
+ // Calls to builtins are not enforced.
+ if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() ||
+ Callee->getBuiltinID() != 0)
+ return;
+
+ // Search through the enforce_tcb and enforce_tcb_leaf attributes to find
+ // all TCBs the callee is a part of.
+ llvm::StringSet<> CalleeTCBs;
+ for_each(Callee->specific_attrs<EnforceTCBAttr>(),
+ [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); });
+ for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(),
+ [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); });
+
+ // Go through the TCBs the caller is a part of and emit warnings if Caller
+ // is in a TCB that the Callee is not.
+ for_each(
+ Caller->specific_attrs<EnforceTCBAttr>(),
+ [&](const auto *A) {
+ StringRef CallerTCB = A->getTCBName();
+ if (CalleeTCBs.count(CallerTCB) == 0) {
+ this->Diag(TheCall->getExprLoc(),
+ diag::warn_tcb_enforcement_violation) << Callee
+ << CallerTCB;
+ }
+ });
+}
diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp
index 0a8a27068ebf..c2785fd60fc2 100644
--- a/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/clang/lib/Sema/SemaCodeComplete.cpp
@@ -2294,6 +2294,29 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
+
+ if (SemaRef.getLangOpts().CPlusPlus11 || SemaRef.getLangOpts().ObjC) {
+ // for ( range_declaration (:|in) range_expression ) { statements }
+ Builder.AddTypedTextChunk("for");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("range-declaration");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ if (SemaRef.getLangOpts().ObjC)
+ Builder.AddTextChunk("in");
+ else
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("range-expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
}
if (S->getContinueParent()) {
@@ -2700,6 +2723,10 @@ static std::string formatObjCParamQualifiers(unsigned ObjCQuals,
case NullabilityKind::Unspecified:
Result += "null_unspecified ";
break;
+
+ case NullabilityKind::NullableResult:
+ llvm_unreachable("Not supported as a context-sensitive keyword!");
+ break;
}
}
}
@@ -3502,9 +3529,11 @@ CodeCompletionString *CodeCompletionResult::createCodeCompletionStringForDecl(
Result.AddTypedTextChunk("");
}
unsigned Idx = 0;
+ // The extra Idx < Sel.getNumArgs() check is needed due to legacy C-style
+ // method parameters.
for (ObjCMethodDecl::param_const_iterator P = Method->param_begin(),
PEnd = Method->param_end();
- P != PEnd; (void)++P, ++Idx) {
+ P != PEnd && Idx < Sel.getNumArgs(); (void)++P, ++Idx) {
if (Idx > 0) {
std::string Keyword;
if (Idx > StartParameter)
@@ -4256,7 +4285,7 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
DS.getParsedSpecifiers() == DeclSpec::PQ_TypeSpecifier &&
DS.getTypeSpecType() == DeclSpec::TST_typename &&
DS.getTypeSpecComplex() == DeclSpec::TSC_unspecified &&
- DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
+ DS.getTypeSpecSign() == TypeSpecifierSign::Unspecified &&
!DS.isTypeAltiVecVector() && S &&
(S->getFlags() & Scope::DeclScope) != 0 &&
(S->getFlags() & (Scope::ClassScope | Scope::TemplateParamScope |
@@ -5395,8 +5424,8 @@ void Sema::CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
AddTypeQualifierResults(DS, Results, LangOpts);
if (LangOpts.CPlusPlus11) {
Results.AddResult("noexcept");
- if (D.getContext() == DeclaratorContext::MemberContext &&
- !D.isCtorOrDtor() && !D.isStaticMember()) {
+ if (D.getContext() == DeclaratorContext::Member && !D.isCtorOrDtor() &&
+ !D.isStaticMember()) {
if (!VS || !VS->isFinalSpecified())
Results.AddResult("final");
if (!VS || !VS->isOverrideSpecified())
@@ -5500,7 +5529,7 @@ typedef CodeCompleteConsumer::OverloadCandidate ResultCandidate;
static void mergeCandidatesWithResults(
Sema &SemaRef, SmallVectorImpl<ResultCandidate> &Results,
- OverloadCandidateSet &CandidateSet, SourceLocation Loc) {
+ OverloadCandidateSet &CandidateSet, SourceLocation Loc, size_t ArgSize) {
// Sort the overload candidate set by placing the best overloads first.
llvm::stable_sort(CandidateSet, [&](const OverloadCandidate &X,
const OverloadCandidate &Y) {
@@ -5510,8 +5539,19 @@ static void mergeCandidatesWithResults(
// Add the remaining viable overload candidates as code-completion results.
for (OverloadCandidate &Candidate : CandidateSet) {
- if (Candidate.Function && Candidate.Function->isDeleted())
- continue;
+ if (Candidate.Function) {
+ if (Candidate.Function->isDeleted())
+ continue;
+ if (!Candidate.Function->isVariadic() &&
+ Candidate.Function->getNumParams() <= ArgSize &&
+ // Having zero args is annoying, normally we don't surface a function
+ // with 2 params, if you already have 2 params, because you are
+ // inserting the 3rd now. But with zero, it helps the user to figure
+ // out there are no overloads that take any arguments. Hence we are
+ // keeping the overload.
+ ArgSize > 0)
+ continue;
+ }
if (Candidate.Viable)
Results.push_back(ResultCandidate(Candidate.Function));
}
@@ -5562,22 +5602,25 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
// FIXME: Provide support for variadic template functions.
// Ignore type-dependent call expressions entirely.
- if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args) ||
- Expr::hasAnyTypeDependentArguments(Args)) {
+ if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args))
return QualType();
- }
+ // In presence of dependent args we surface all possible signatures using the
+ // non-dependent args in the prefix. Afterwards we do a post filtering to make
+ // sure provided candidates satisfy parameter count restrictions.
+ auto ArgsWithoutDependentTypes =
+ Args.take_while([](Expr *Arg) { return !Arg->isTypeDependent(); });
+
+ SmallVector<ResultCandidate, 8> Results;
+ Expr *NakedFn = Fn->IgnoreParenCasts();
// Build an overload candidate set based on the functions we find.
SourceLocation Loc = Fn->getExprLoc();
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
- SmallVector<ResultCandidate, 8> Results;
-
- Expr *NakedFn = Fn->IgnoreParenCasts();
- if (auto ULE = dyn_cast<UnresolvedLookupExpr>(NakedFn))
- AddOverloadedCallCandidates(ULE, Args, CandidateSet,
+ if (auto ULE = dyn_cast<UnresolvedLookupExpr>(NakedFn)) {
+ AddOverloadedCallCandidates(ULE, ArgsWithoutDependentTypes, CandidateSet,
/*PartialOverloading=*/true);
- else if (auto UME = dyn_cast<UnresolvedMemberExpr>(NakedFn)) {
+ } else if (auto UME = dyn_cast<UnresolvedMemberExpr>(NakedFn)) {
TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
if (UME->hasExplicitTemplateArgs()) {
UME->copyTemplateArgumentsInto(TemplateArgsBuffer);
@@ -5587,7 +5630,8 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
// Add the base as first argument (use a nullptr if the base is implicit).
SmallVector<Expr *, 12> ArgExprs(
1, UME->isImplicitAccess() ? nullptr : UME->getBase());
- ArgExprs.append(Args.begin(), Args.end());
+ ArgExprs.append(ArgsWithoutDependentTypes.begin(),
+ ArgsWithoutDependentTypes.end());
UnresolvedSet<8> Decls;
Decls.append(UME->decls_begin(), UME->decls_end());
const bool FirstArgumentIsBase = !UME->isImplicitAccess() && UME->getBase();
@@ -5606,7 +5650,7 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
Results.push_back(ResultCandidate(FD));
else
AddOverloadCandidate(FD, DeclAccessPair::make(FD, FD->getAccess()),
- Args, CandidateSet,
+ ArgsWithoutDependentTypes, CandidateSet,
/*SuppressUserConversions=*/false,
/*PartialOverloading=*/true);
@@ -5621,7 +5665,8 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
LookupQualifiedName(R, DC);
R.suppressDiagnostics();
SmallVector<Expr *, 12> ArgExprs(1, NakedFn);
- ArgExprs.append(Args.begin(), Args.end());
+ ArgExprs.append(ArgsWithoutDependentTypes.begin(),
+ ArgsWithoutDependentTypes.end());
AddFunctionCandidates(R.asUnresolvedSet(), ArgExprs, CandidateSet,
/*ExplicitArgs=*/nullptr,
/*SuppressUserConversions=*/false,
@@ -5635,7 +5680,8 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
T = T->getPointeeType();
if (auto FP = T->getAs<FunctionProtoType>()) {
- if (!TooManyArguments(FP->getNumParams(), Args.size(),
+ if (!TooManyArguments(FP->getNumParams(),
+ ArgsWithoutDependentTypes.size(),
/*PartialOverloading=*/true) ||
FP->isVariadic())
Results.push_back(ResultCandidate(FP));
@@ -5644,7 +5690,7 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
Results.push_back(ResultCandidate(FT));
}
}
- mergeCandidatesWithResults(*this, Results, CandidateSet, Loc);
+ mergeCandidatesWithResults(*this, Results, CandidateSet, Loc, Args.size());
QualType ParamType =
ProduceSignatureHelp(*this, S, Results, Args.size(), OpenParLoc);
return !CandidateSet.empty() ? ParamType : QualType();
@@ -5685,7 +5731,7 @@ QualType Sema::ProduceConstructorSignatureHelp(Scope *S, QualType Type,
}
SmallVector<ResultCandidate, 8> Results;
- mergeCandidatesWithResults(*this, Results, CandidateSet, Loc);
+ mergeCandidatesWithResults(*this, Results, CandidateSet, Loc, Args.size());
return ProduceSignatureHelp(*this, S, Results, Args.size(), OpenParLoc);
}
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index ddd95faebe99..1ff7b1cdd515 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -1053,25 +1053,20 @@ ReturnTypeRequirement(TemplateParameterList *TPL) :
auto *Constraint =
cast_or_null<ConceptSpecializationExpr>(
TC->getImmediatelyDeclaredConstraint());
- bool Dependent = false;
- if (Constraint->getTemplateArgsAsWritten()) {
- for (auto &ArgLoc :
- Constraint->getTemplateArgsAsWritten()->arguments().drop_front(1)) {
- if (ArgLoc.getArgument().isDependent()) {
- Dependent = true;
- break;
- }
- }
- }
+ bool Dependent =
+ Constraint->getTemplateArgsAsWritten() &&
+ TemplateSpecializationType::anyInstantiationDependentTemplateArguments(
+ Constraint->getTemplateArgsAsWritten()->arguments().drop_front(1));
TypeConstraintInfo.setInt(Dependent ? 1 : 0);
}
concepts::TypeRequirement::TypeRequirement(TypeSourceInfo *T) :
- Requirement(RK_Type, T->getType()->isDependentType(),
+ Requirement(RK_Type, T->getType()->isInstantiationDependentType(),
T->getType()->containsUnexpandedParameterPack(),
// We reach this ctor with either dependent types (in which
// IsSatisfied doesn't matter) or with non-dependent type in
// which the existence of the type indicates satisfaction.
- /*IsSatisfied=*/true
- ), Value(T),
- Status(T->getType()->isDependentType() ? SS_Dependent : SS_Satisfied) {}
+ /*IsSatisfied=*/true),
+ Value(T),
+ Status(T->getType()->isInstantiationDependentType() ? SS_Dependent
+ : SS_Satisfied) {}
diff --git a/clang/lib/Sema/SemaCoroutine.cpp b/clang/lib/Sema/SemaCoroutine.cpp
index 992cccac6405..7a48bfa429e9 100644
--- a/clang/lib/Sema/SemaCoroutine.cpp
+++ b/clang/lib/Sema/SemaCoroutine.cpp
@@ -398,39 +398,54 @@ static Expr *maybeTailCall(Sema &S, QualType RetType, Expr *E,
diag::warn_coroutine_handle_address_invalid_return_type)
<< JustAddress->getType();
+ // Clean up temporary objects so that they don't live across suspension points
+ // unnecessarily. We choose to clean up before the call to
+ // __builtin_coro_resume so that the cleanup code are not inserted in-between
+ // the resume call and return instruction, which would interfere with the
+ // musttail call contract.
+ JustAddress = S.MaybeCreateExprWithCleanups(JustAddress);
return buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_resume,
JustAddress);
}
/// Build calls to await_ready, await_suspend, and await_resume for a co_await
/// expression.
+/// The generated AST tries to clean up temporary objects as early as
+/// possible so that they don't live across suspension points if possible.
+/// Having temporary objects living across suspension points unnecessarily can
+/// lead to large frame size, and also lead to memory corruptions if the
+/// coroutine frame is destroyed after coming back from suspension. This is done
+/// by wrapping both the await_ready call and the await_suspend call with
+/// ExprWithCleanups. In the end of this function, we also need to explicitly
+/// set cleanup state so that the CoawaitExpr is also wrapped with an
+/// ExprWithCleanups to clean up the awaiter associated with the co_await
+/// expression.
static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
SourceLocation Loc, Expr *E) {
OpaqueValueExpr *Operand = new (S.Context)
OpaqueValueExpr(Loc, E->getType(), VK_LValue, E->getObjectKind(), E);
- // Assume invalid until we see otherwise.
- ReadySuspendResumeResult Calls = {{}, Operand, /*IsInvalid=*/true};
-
- ExprResult CoroHandleRes = buildCoroutineHandle(S, CoroPromise->getType(), Loc);
- if (CoroHandleRes.isInvalid())
- return Calls;
- Expr *CoroHandle = CoroHandleRes.get();
+ // Assume valid until we see otherwise.
+ // Further operations are responsible for setting IsInalid to true.
+ ReadySuspendResumeResult Calls = {{}, Operand, /*IsInvalid=*/false};
- const StringRef Funcs[] = {"await_ready", "await_suspend", "await_resume"};
- MultiExprArg Args[] = {None, CoroHandle, None};
- for (size_t I = 0, N = llvm::array_lengthof(Funcs); I != N; ++I) {
- ExprResult Result = buildMemberCall(S, Operand, Loc, Funcs[I], Args[I]);
- if (Result.isInvalid())
- return Calls;
- Calls.Results[I] = Result.get();
- }
+ using ACT = ReadySuspendResumeResult::AwaitCallType;
- // Assume the calls are valid; all further checking should make them invalid.
- Calls.IsInvalid = false;
+ auto BuildSubExpr = [&](ACT CallType, StringRef Func,
+ MultiExprArg Arg) -> Expr * {
+ ExprResult Result = buildMemberCall(S, Operand, Loc, Func, Arg);
+ if (Result.isInvalid()) {
+ Calls.IsInvalid = true;
+ return nullptr;
+ }
+ Calls.Results[CallType] = Result.get();
+ return Result.get();
+ };
- using ACT = ReadySuspendResumeResult::AwaitCallType;
- CallExpr *AwaitReady = cast<CallExpr>(Calls.Results[ACT::ACT_Ready]);
+ CallExpr *AwaitReady =
+ cast_or_null<CallExpr>(BuildSubExpr(ACT::ACT_Ready, "await_ready", None));
+ if (!AwaitReady)
+ return Calls;
if (!AwaitReady->getType()->isDependentType()) {
// [expr.await]p3 [...]
// — await-ready is the expression e.await_ready(), contextually converted
@@ -442,18 +457,36 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
S.Diag(Loc, diag::note_coroutine_promise_call_implicitly_required)
<< AwaitReady->getDirectCallee() << E->getSourceRange();
Calls.IsInvalid = true;
- }
- Calls.Results[ACT::ACT_Ready] = Conv.get();
+ } else
+ Calls.Results[ACT::ACT_Ready] = S.MaybeCreateExprWithCleanups(Conv.get());
+ }
+
+ ExprResult CoroHandleRes =
+ buildCoroutineHandle(S, CoroPromise->getType(), Loc);
+ if (CoroHandleRes.isInvalid()) {
+ Calls.IsInvalid = true;
+ return Calls;
}
- CallExpr *AwaitSuspend = cast<CallExpr>(Calls.Results[ACT::ACT_Suspend]);
+ Expr *CoroHandle = CoroHandleRes.get();
+ CallExpr *AwaitSuspend = cast_or_null<CallExpr>(
+ BuildSubExpr(ACT::ACT_Suspend, "await_suspend", CoroHandle));
+ if (!AwaitSuspend)
+ return Calls;
if (!AwaitSuspend->getType()->isDependentType()) {
// [expr.await]p3 [...]
// - await-suspend is the expression e.await_suspend(h), which shall be
- // a prvalue of type void or bool.
+ // a prvalue of type void, bool, or std::coroutine_handle<Z> for some
+ // type Z.
QualType RetType = AwaitSuspend->getCallReturnType(S.Context);
// Experimental support for coroutine_handle returning await_suspend.
- if (Expr *TailCallSuspend = maybeTailCall(S, RetType, AwaitSuspend, Loc))
+ if (Expr *TailCallSuspend =
+ maybeTailCall(S, RetType, AwaitSuspend, Loc))
+ // Note that we don't wrap the expression with ExprWithCleanups here
+ // because that might interfere with tailcall contract (e.g. inserting
+ // clean up instructions in-between tailcall and return). Instead
+ // ExprWithCleanups is wrapped within maybeTailCall() prior to the resume
+ // call.
Calls.Results[ACT::ACT_Suspend] = TailCallSuspend;
else {
// non-class prvalues always have cv-unqualified types
@@ -465,10 +498,17 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
S.Diag(Loc, diag::note_coroutine_promise_call_implicitly_required)
<< AwaitSuspend->getDirectCallee();
Calls.IsInvalid = true;
- }
+ } else
+ Calls.Results[ACT::ACT_Suspend] =
+ S.MaybeCreateExprWithCleanups(AwaitSuspend);
}
}
+ BuildSubExpr(ACT::ACT_Resume, "await_resume", None);
+
+ // Make sure the awaiter object gets a chance to be cleaned up.
+ S.Cleanup.setExprNeedsCleanups(true);
+
return Calls;
}
@@ -504,6 +544,7 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
auto *VD = VarDecl::Create(Context, FD, FD->getLocation(), FD->getLocation(),
&PP.getIdentifierTable().get("__promise"), T,
Context.getTrivialTypeSourceInfo(T, Loc), SC_None);
+ VD->setImplicit();
CheckVariableDeclarationType(VD);
if (VD->isInvalidDecl())
return nullptr;
@@ -865,8 +906,8 @@ ExprResult Sema::BuildResolvedCoawaitExpr(SourceLocation Loc, Expr *E,
SourceLocation CallLoc = E->getExprLoc();
// Build the await_ready, await_suspend, await_resume calls.
- ReadySuspendResumeResult RSS =
- buildCoawaitCalls(*this, Coroutine->CoroutinePromise, CallLoc, E);
+ ReadySuspendResumeResult RSS = buildCoawaitCalls(
+ *this, Coroutine->CoroutinePromise, CallLoc, E);
if (RSS.IsInvalid)
return ExprError();
@@ -920,8 +961,8 @@ ExprResult Sema::BuildCoyieldExpr(SourceLocation Loc, Expr *E) {
E = CreateMaterializeTemporaryExpr(E->getType(), E, true);
// Build the await_ready, await_suspend, await_resume calls.
- ReadySuspendResumeResult RSS =
- buildCoawaitCalls(*this, Coroutine->CoroutinePromise, Loc, E);
+ ReadySuspendResumeResult RSS = buildCoawaitCalls(
+ *this, Coroutine->CoroutinePromise, Loc, E);
if (RSS.IsInvalid)
return ExprError();
@@ -1537,6 +1578,7 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
S.Context, &FD, FD.getLocation(), FD.getLocation(),
&S.PP.getIdentifierTable().get("__coro_gro"), GroType,
S.Context.getTrivialTypeSourceInfo(GroType, Loc), SC_None);
+ GroDecl->setImplicit();
S.CheckVariableDeclarationType(GroDecl);
if (GroDecl->isInvalidDecl())
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 3e2b61ae8cdf..3ee0c43097d7 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -255,7 +255,7 @@ static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
// We found some types in dependent base classes. Recover as if the user
// wrote 'typename MyClass::II' instead of 'II'. We'll fully resolve the
// lookup during template instantiation.
- S.Diag(NameLoc, diag::ext_found_via_dependent_bases_lookup) << &II;
+ S.Diag(NameLoc, diag::ext_found_in_dependent_base) << &II;
ASTContext &Context = S.Context;
auto *NNS = NestedNameSpecifier::Create(Context, nullptr, false,
@@ -436,9 +436,7 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Res != ResEnd; ++Res) {
if (isa<TypeDecl>(*Res) || isa<ObjCInterfaceDecl>(*Res) ||
(AllowDeducedTemplate && getAsTypeTemplateDecl(*Res))) {
- if (!IIDecl ||
- (*Res)->getLocation().getRawEncoding() <
- IIDecl->getLocation().getRawEncoding())
+ if (!IIDecl || (*Res)->getLocation() < IIDecl->getLocation())
IIDecl = *Res;
}
}
@@ -1184,23 +1182,20 @@ Corrected:
return ParsedType::make(T);
}
- // FIXME: This is context-dependent. We need to defer building the member
- // expression until the classification is consumed.
- if (FirstDecl->isCXXClassMember())
- return NameClassification::ContextIndependentExpr(
- BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result, nullptr,
- S));
-
// If we already know which single declaration is referenced, just annotate
- // that declaration directly.
+ // that declaration directly. Defer resolving even non-overloaded class
+ // member accesses, as we need to defer certain access checks until we know
+ // the context.
bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren));
- if (Result.isSingleResult() && !ADL)
+ if (Result.isSingleResult() && !ADL && !FirstDecl->isCXXClassMember())
return NameClassification::NonType(Result.getRepresentativeDecl());
- // Build an UnresolvedLookupExpr. Note that this doesn't depend on the
- // context in which we performed classification, so it's safe to do now.
- return NameClassification::ContextIndependentExpr(
- BuildDeclarationNameExpr(SS, Result, ADL));
+ // Otherwise, this is an overload set that we will need to resolve later.
+ Result.suppressDiagnostics();
+ return NameClassification::OverloadSet(UnresolvedLookupExpr::Create(
+ Context, Result.getNamingClass(), SS.getWithLocInContext(Context),
+ Result.getLookupNameInfo(), ADL, Result.isOverloadedResult(),
+ Result.begin(), Result.end()));
}
ExprResult
@@ -1240,6 +1235,30 @@ ExprResult Sema::ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
return BuildDeclarationNameExpr(SS, Result, ADL);
}
+ExprResult Sema::ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *E) {
+ // For an implicit class member access, transform the result into a member
+ // access expression if necessary.
+ auto *ULE = cast<UnresolvedLookupExpr>(E);
+ if ((*ULE->decls_begin())->isCXXClassMember()) {
+ CXXScopeSpec SS;
+ SS.Adopt(ULE->getQualifierLoc());
+
+ // Reconstruct the lookup result.
+ LookupResult Result(*this, ULE->getName(), ULE->getNameLoc(),
+ LookupOrdinaryName);
+ Result.setNamingClass(ULE->getNamingClass());
+ for (auto I = ULE->decls_begin(), E = ULE->decls_end(); I != E; ++I)
+ Result.addDecl(*I, I.getAccess());
+ Result.resolveKind();
+ return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result,
+ nullptr, S);
+ }
+
+ // Otherwise, this is already in the form we needed, and no further checks
+ // are necessary.
+ return ULE;
+}
+
Sema::TemplateNameKindForDiagnostics
Sema::getTemplateNameKindForDiagnostics(TemplateName Name) {
auto *TD = Name.getAsTemplateDecl();
@@ -1457,10 +1476,7 @@ void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
// Out-of-line definitions shouldn't be pushed into scope in C++, unless they
// are function-local declarations.
- if (getLangOpts().CPlusPlus && D->isOutOfLine() &&
- !D->getDeclContext()->getRedeclContext()->Equals(
- D->getLexicalDeclContext()->getRedeclContext()) &&
- !D->getLexicalDeclContext()->isFunctionOrMethod())
+ if (getLangOpts().CPlusPlus && D->isOutOfLine() && !S->getFnParent())
return;
// Template instantiations should also not be pushed into scope.
@@ -1742,25 +1758,20 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (D->isInvalidDecl())
return false;
- bool Referenced = false;
if (auto *DD = dyn_cast<DecompositionDecl>(D)) {
// For a decomposition declaration, warn if none of the bindings are
// referenced, instead of if the variable itself is referenced (which
// it is, by the bindings' expressions).
- for (auto *BD : DD->bindings()) {
- if (BD->isReferenced()) {
- Referenced = true;
- break;
- }
- }
+ for (auto *BD : DD->bindings())
+ if (BD->isReferenced())
+ return false;
} else if (!D->getDeclName()) {
return false;
} else if (D->isReferenced() || D->isUsed()) {
- Referenced = true;
+ return false;
}
- if (Referenced || D->hasAttr<UnusedAttr>() ||
- D->hasAttr<ObjCPreciseLifetimeAttr>())
+ if (D->hasAttr<UnusedAttr>() || D->hasAttr<ObjCPreciseLifetimeAttr>())
return false;
if (isa<LabelDecl>(D))
@@ -1905,7 +1916,7 @@ static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
else
Diagnose = L->getStmt() == nullptr;
if (Diagnose)
- S.Diag(L->getLocation(), diag::err_undeclared_label_use) <<L->getDeclName();
+ S.Diag(L->getLocation(), diag::err_undeclared_label_use) << L;
}
void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
@@ -2019,24 +2030,6 @@ Scope *Sema::getNonFieldDeclScope(Scope *S) {
return S;
}
-/// Looks up the declaration of "struct objc_super" and
-/// saves it for later use in building builtin declaration of
-/// objc_msgSendSuper and objc_msgSendSuper_stret. If no such
-/// pre-existing declaration exists no action takes place.
-static void LookupPredefedObjCSuperType(Sema &ThisSema, Scope *S,
- IdentifierInfo *II) {
- if (!II->isStr("objc_msgSendSuper"))
- return;
- ASTContext &Context = ThisSema.Context;
-
- LookupResult Result(ThisSema, &Context.Idents.get("objc_super"),
- SourceLocation(), Sema::LookupTagName);
- ThisSema.LookupName(Result, S);
- if (Result.getResultKind() == LookupResult::Found)
- if (const TagDecl *TD = Result.getAsSingle<TagDecl>())
- Context.setObjCSuperType(Context.getTagDeclType(TD));
-}
-
static StringRef getHeaderName(Builtin::Context &BuiltinInfo, unsigned ID,
ASTContext::GetBuiltinTypeError Error) {
switch (Error) {
@@ -2054,6 +2047,42 @@ static StringRef getHeaderName(Builtin::Context &BuiltinInfo, unsigned ID,
llvm_unreachable("unhandled error kind");
}
+FunctionDecl *Sema::CreateBuiltin(IdentifierInfo *II, QualType Type,
+ unsigned ID, SourceLocation Loc) {
+ DeclContext *Parent = Context.getTranslationUnitDecl();
+
+ if (getLangOpts().CPlusPlus) {
+ LinkageSpecDecl *CLinkageDecl = LinkageSpecDecl::Create(
+ Context, Parent, Loc, Loc, LinkageSpecDecl::lang_c, false);
+ CLinkageDecl->setImplicit();
+ Parent->addDecl(CLinkageDecl);
+ Parent = CLinkageDecl;
+ }
+
+ FunctionDecl *New = FunctionDecl::Create(Context, Parent, Loc, Loc, II, Type,
+ /*TInfo=*/nullptr, SC_Extern, false,
+ Type->isFunctionProtoType());
+ New->setImplicit();
+ New->addAttr(BuiltinAttr::CreateImplicit(Context, ID));
+
+ // Create Decl objects for each parameter, adding them to the
+ // FunctionDecl.
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(Type)) {
+ SmallVector<ParmVarDecl *, 16> Params;
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ ParmVarDecl *parm = ParmVarDecl::Create(
+ Context, New, SourceLocation(), SourceLocation(), nullptr,
+ FT->getParamType(i), /*TInfo=*/nullptr, SC_None, nullptr);
+ parm->setScopeInfo(0, i);
+ Params.push_back(parm);
+ }
+ New->setParams(Params);
+ }
+
+ AddKnownFunctionAttributes(New);
+ return New;
+}
+
/// LazilyCreateBuiltin - The specified Builtin-ID was first used at
/// file scope. lazily create a decl for it. ForRedeclaration is true
/// if we're creating this built-in in anticipation of redeclaring the
@@ -2061,7 +2090,7 @@ static StringRef getHeaderName(Builtin::Context &BuiltinInfo, unsigned ID,
NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc) {
- LookupPredefedObjCSuperType(*this, S, II);
+ LookupNecessaryTypesForBuiltin(S, ID);
ASTContext::GetBuiltinTypeError Error;
QualType R = Context.GetBuiltinType(ID, Error);
@@ -2071,7 +2100,8 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
// If we have a builtin without an associated type we should not emit a
// warning when we were not able to find a type for it.
- if (Error == ASTContext::GE_Missing_type)
+ if (Error == ASTContext::GE_Missing_type ||
+ Context.BuiltinInfo.allowTypeMismatch(ID))
return nullptr;
// If we could not find a type for setjmp it is because the jmp_buf type was
@@ -2095,50 +2125,15 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Context.BuiltinInfo.isHeaderDependentFunction(ID))) {
Diag(Loc, diag::ext_implicit_lib_function_decl)
<< Context.BuiltinInfo.getName(ID) << R;
- if (Context.BuiltinInfo.getHeaderName(ID) &&
- !Diags.isIgnored(diag::ext_implicit_lib_function_decl, Loc))
+ if (const char *Header = Context.BuiltinInfo.getHeaderName(ID))
Diag(Loc, diag::note_include_header_or_declare)
- << Context.BuiltinInfo.getHeaderName(ID)
- << Context.BuiltinInfo.getName(ID);
+ << Header << Context.BuiltinInfo.getName(ID);
}
if (R.isNull())
return nullptr;
- DeclContext *Parent = Context.getTranslationUnitDecl();
- if (getLangOpts().CPlusPlus) {
- LinkageSpecDecl *CLinkageDecl =
- LinkageSpecDecl::Create(Context, Parent, Loc, Loc,
- LinkageSpecDecl::lang_c, false);
- CLinkageDecl->setImplicit();
- Parent->addDecl(CLinkageDecl);
- Parent = CLinkageDecl;
- }
-
- FunctionDecl *New = FunctionDecl::Create(Context,
- Parent,
- Loc, Loc, II, R, /*TInfo=*/nullptr,
- SC_Extern,
- false,
- R->isFunctionProtoType());
- New->setImplicit();
-
- // Create Decl objects for each parameter, adding them to the
- // FunctionDecl.
- if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) {
- SmallVector<ParmVarDecl*, 16> Params;
- for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
- ParmVarDecl *parm =
- ParmVarDecl::Create(Context, New, SourceLocation(), SourceLocation(),
- nullptr, FT->getParamType(i), /*TInfo=*/nullptr,
- SC_None, nullptr);
- parm->setScopeInfo(0, i);
- Params.push_back(parm);
- }
- New->setParams(Params);
- }
-
- AddKnownFunctionAttributes(New);
+ FunctionDecl *New = CreateBuiltin(II, R, ID, Loc);
RegisterLocallyScopedExternCDecl(New, S);
// TUScope is the translation-unit scope to insert this function into.
@@ -2146,7 +2141,7 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
// relate Scopes to DeclContexts, and probably eliminate CurContext
// entirely, but we're not there yet.
DeclContext *SavedContext = CurContext;
- CurContext = Parent;
+ CurContext = New->getDeclContext();
PushOnScopeChains(New, TUScope);
CurContext = SavedContext;
return New;
@@ -2591,6 +2586,8 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
return false;
} else if (const auto *MA = dyn_cast<MinSizeAttr>(Attr))
NewAttr = S.mergeMinSizeAttr(D, *MA);
+ else if (const auto *SNA = dyn_cast<SwiftNameAttr>(Attr))
+ NewAttr = S.mergeSwiftNameAttr(D, *SNA, SNA->getName());
else if (const auto *OA = dyn_cast<OptimizeNoneAttr>(Attr))
NewAttr = S.mergeOptimizeNoneAttr(D, *OA);
else if (const auto *InternalLinkageA = dyn_cast<InternalLinkageAttr>(Attr))
@@ -2615,6 +2612,10 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
NewAttr = S.mergeImportModuleAttr(D, *IMA);
else if (const auto *INA = dyn_cast<WebAssemblyImportNameAttr>(Attr))
NewAttr = S.mergeImportNameAttr(D, *INA);
+ else if (const auto *TCBA = dyn_cast<EnforceTCBAttr>(Attr))
+ NewAttr = S.mergeEnforceTCBAttr(D, *TCBA);
+ else if (const auto *TCBLA = dyn_cast<EnforceTCBLeafAttr>(Attr))
+ NewAttr = S.mergeEnforceTCBLeafAttr(D, *TCBLA);
else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
@@ -2638,8 +2639,11 @@ static const NamedDecl *getDefinition(const Decl *D) {
return Def;
return VD->getActingDefinition();
}
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- return FD->getDefinition();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ const FunctionDecl *Def = nullptr;
+ if (FD->isDefined(Def, true))
+ return Def;
+ }
return nullptr;
}
@@ -3234,6 +3238,10 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
}
}
+ // If the old declaration was found in an inline namespace and the new
+ // declaration was qualified, update the DeclContext to match.
+ adjustDeclContextForDeclaratorDecl(New, Old);
+
// If the old declaration is invalid, just give up here.
if (Old->isInvalidDecl())
return true;
@@ -3348,7 +3356,10 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
// there but not here.
NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
RequiresAdjustment = true;
- } else if (New->getBuiltinID()) {
+ } else if (Old->getBuiltinID()) {
+ // Builtin attribute isn't propagated to the new one yet at this point,
+ // so we check if the old one is a builtin.
+
// Calling Conventions on a Builtin aren't really useful and setting a
// default calling convention and cdecl'ing some builtin redeclarations is
// common, so warn and ignore the calling convention on the redeclaration.
@@ -3781,18 +3792,6 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
Diag(New->getLocation(), diag::warn_redecl_library_builtin) << New;
Diag(OldLocation, diag::note_previous_builtin_declaration)
<< Old << Old->getType();
-
- // If this is a global redeclaration, just forget hereafter
- // about the "builtin-ness" of the function.
- //
- // Doing this for local extern declarations is problematic. If
- // the builtin declaration remains visible, a second invalid
- // local declaration will produce a hard error; if it doesn't
- // remain visible, a single bogus local redeclaration (which is
- // actually only a warning) could break all the downstream code.
- if (!New->getLexicalDeclContext()->isFunctionOrMethod())
- New->getIdentifier()->revertBuiltin();
-
return false;
}
@@ -4057,6 +4056,10 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
return New->setInvalidDecl();
}
+ // If the old declaration was found in an inline namespace and the new
+ // declaration was qualified, update the DeclContext to match.
+ adjustDeclContextForDeclaratorDecl(New, Old);
+
// Ensure the template parameters are compatible.
if (NewTemplate &&
!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
@@ -4241,7 +4244,6 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
New->setPreviousDecl(Old);
if (NewTemplate)
NewTemplate->setPreviousDecl(OldTemplate);
- adjustDeclContextForDeclaratorDecl(New, Old);
// Inherit access appropriately.
New->setAccess(Old->getAccess());
@@ -4606,10 +4608,10 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
if (Tag)
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag)
<< GetDiagnosticTypeSpecifierID(DS.getTypeSpecType())
- << DS.getConstexprSpecifier();
+ << static_cast<int>(DS.getConstexprSpecifier());
else
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_wrong_decl_kind)
- << DS.getConstexprSpecifier();
+ << static_cast<int>(DS.getConstexprSpecifier());
// Don't emit warnings after this error.
return TagD;
}
@@ -4745,7 +4747,10 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
if (!DeclaresAnything) {
// In C, we allow this as a (popular) extension / bug. Don't bother
// producing further diagnostics for redundant qualifiers after this.
- Diag(DS.getBeginLoc(), diag::ext_no_declarators) << DS.getSourceRange();
+ Diag(DS.getBeginLoc(), (IsExplicitInstantiation || !TemplateParams.empty())
+ ? diag::err_no_declarators
+ : diag::ext_no_declarators)
+ << DS.getSourceRange();
return TagD;
}
@@ -5164,7 +5169,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
Diag(DS.getBeginLoc(), diag::ext_no_declarators) << DS.getSourceRange();
// Mock up a declarator.
- Declarator Dc(DS, DeclaratorContext::MemberContext);
+ Declarator Dc(DS, DeclaratorContext::Member);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct/union");
@@ -5261,7 +5266,7 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
assert(Record && "expected a record!");
// Mock up a declarator.
- Declarator Dc(DS, DeclaratorContext::TypeNameContext);
+ Declarator Dc(DS, DeclaratorContext::TypeName);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct");
@@ -5346,8 +5351,8 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
case UnqualifiedIdKind::IK_OperatorFunctionId:
NameInfo.setName(Context.DeclarationNames.getCXXOperatorName(
Name.OperatorFunctionId.Operator));
- NameInfo.getInfo().CXXOperatorName.BeginOpNameLoc
- = Name.OperatorFunctionId.SymbolLocations[0];
+ NameInfo.getInfo().CXXOperatorName.BeginOpNameLoc =
+ Name.OperatorFunctionId.SymbolLocations[0].getRawEncoding();
NameInfo.getInfo().CXXOperatorName.EndOpNameLoc
= Name.EndLocation.getRawEncoding();
return NameInfo;
@@ -5495,7 +5500,7 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
// Grab the type from the parser.
TypeSourceInfo *TSI = nullptr;
QualType T = S.GetTypeFromParser(DS.getRepAsType(), &TSI);
- if (T.isNull() || !T->isDependentType()) break;
+ if (T.isNull() || !T->isInstantiationDependentType()) break;
// Make sure there's a type source info. This isn't really much
// of a waste; most dependent types should have type source info
@@ -5547,7 +5552,7 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
}
Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
- D.setFunctionDefinitionKind(FDK_Declaration);
+ D.setFunctionDefinitionKind(FunctionDefinitionKind::Declaration);
Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg());
if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() &&
@@ -5936,9 +5941,14 @@ static QualType TryToFixInvalidVariablyModifiedType(QualType T,
const VariableArrayType* VLATy = dyn_cast<VariableArrayType>(T);
if (!VLATy)
return QualType();
- // FIXME: We should probably handle this case
- if (VLATy->getElementType()->isVariablyModifiedType())
- return QualType();
+
+ QualType ElemTy = VLATy->getElementType();
+ if (ElemTy->isVariablyModifiedType()) {
+ ElemTy = TryToFixInvalidVariablyModifiedType(ElemTy, Context,
+ SizeIsNegative, Oversized);
+ if (ElemTy.isNull())
+ return QualType();
+ }
Expr::EvalResult Result;
if (!VLATy->getSizeExpr() ||
@@ -5954,16 +5964,19 @@ static QualType TryToFixInvalidVariablyModifiedType(QualType T,
}
// Check whether the array is too large to be addressed.
- unsigned ActiveSizeBits
- = ConstantArrayType::getNumAddressingBits(Context, VLATy->getElementType(),
- Res);
+ unsigned ActiveSizeBits =
+ (!ElemTy->isDependentType() && !ElemTy->isVariablyModifiedType() &&
+ !ElemTy->isIncompleteType() && !ElemTy->isUndeducedType())
+ ? ConstantArrayType::getNumAddressingBits(Context, ElemTy, Res)
+ : Res.getActiveBits();
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
Oversized = Res;
return QualType();
}
- return Context.getConstantArrayType(
- VLATy->getElementType(), Res, VLATy->getSizeExpr(), ArrayType::Normal, 0);
+ QualType FoldedArrayType = Context.getConstantArrayType(
+ ElemTy, Res, VLATy->getSizeExpr(), ArrayType::Normal, 0);
+ return Qs.apply(Context, FoldedArrayType);
}
static void
@@ -5989,7 +6002,13 @@ FixInvalidVariablyModifiedTypeLoc(TypeLoc SrcTL, TypeLoc DstTL) {
ArrayTypeLoc DstATL = DstTL.castAs<ArrayTypeLoc>();
TypeLoc SrcElemTL = SrcATL.getElementLoc();
TypeLoc DstElemTL = DstATL.getElementLoc();
- DstElemTL.initializeFullCopy(SrcElemTL);
+ if (VariableArrayTypeLoc SrcElemATL =
+ SrcElemTL.getAs<VariableArrayTypeLoc>()) {
+ ConstantArrayTypeLoc DstElemATL = DstElemTL.castAs<ConstantArrayTypeLoc>();
+ FixInvalidVariablyModifiedTypeLoc(SrcElemATL, DstElemATL);
+ } else {
+ DstElemTL.initializeFullCopy(SrcElemTL);
+ }
DstATL.setLBracketLoc(SrcATL.getLBracketLoc());
DstATL.setSizeExpr(SrcATL.getSizeExpr());
DstATL.setRBracketLoc(SrcATL.getRBracketLoc());
@@ -6014,6 +6033,31 @@ TryToFixInvalidVariablyModifiedTypeSourceInfo(TypeSourceInfo *TInfo,
return FixedTInfo;
}
+/// Attempt to fold a variable-sized type to a constant-sized type, returning
+/// true if we were successful.
+static bool tryToFixVariablyModifiedVarType(Sema &S, TypeSourceInfo *&TInfo,
+ QualType &T, SourceLocation Loc,
+ unsigned FailedFoldDiagID) {
+ bool SizeIsNegative;
+ llvm::APSInt Oversized;
+ TypeSourceInfo *FixedTInfo = TryToFixInvalidVariablyModifiedTypeSourceInfo(
+ TInfo, S.Context, SizeIsNegative, Oversized);
+ if (FixedTInfo) {
+ S.Diag(Loc, diag::ext_vla_folded_to_constant);
+ TInfo = FixedTInfo;
+ T = FixedTInfo->getType();
+ return true;
+ }
+
+ if (SizeIsNegative)
+ S.Diag(Loc, diag::err_typecheck_negative_array_size);
+ else if (Oversized.getBoolValue())
+ S.Diag(Loc, diag::err_array_too_large) << Oversized.toString(10);
+ else if (FailedFoldDiagID)
+ S.Diag(Loc, FailedFoldDiagID);
+ return false;
+}
+
/// Register the given locally-scoped extern "C" declaration so
/// that it can be found later for redeclarations. We include any extern "C"
/// declaration that is not visible in the translation unit here, not just
@@ -6073,7 +6117,7 @@ Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
<< getLangOpts().CPlusPlus17;
if (D.getDeclSpec().hasConstexprSpecifier())
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
- << 1 << D.getDeclSpec().getConstexprSpecifier();
+ << 1 << static_cast<int>(D.getDeclSpec().getConstexprSpecifier());
if (D.getName().Kind != UnqualifiedIdKind::IK_Identifier) {
if (D.getName().Kind == UnqualifiedIdKind::IK_DeductionGuideName)
@@ -6119,7 +6163,7 @@ Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) {
SizeIsNegative,
Oversized);
if (FixedTInfo) {
- Diag(NewTD->getLocation(), diag::warn_illegal_constant_array_size);
+ Diag(NewTD->getLocation(), diag::ext_vla_folded_to_constant);
NewTD->setTypeSourceInfo(FixedTInfo);
} else {
if (SizeIsNegative)
@@ -6501,7 +6545,7 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
// special MSVC extension: in the last case, the declaration is treated as if
// it were marked dllexport.
bool IsInline = false, IsStaticDataMember = false, IsQualifiedFriend = false;
- bool IsMicrosoft = S.Context.getTargetInfo().getCXXABI().isMicrosoft();
+ bool IsMicrosoftABI = S.Context.getTargetInfo().shouldDLLImportComdatSymbols();
if (const auto *VD = dyn_cast<VarDecl>(NewDecl)) {
// Ignore static data because out-of-line definitions are diagnosed
// separately.
@@ -6515,9 +6559,9 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
}
if (OldImportAttr && !HasNewAttr &&
- (!IsInline || (IsMicrosoft && IsTemplate)) && !IsStaticDataMember &&
+ (!IsInline || (IsMicrosoftABI && IsTemplate)) && !IsStaticDataMember &&
!NewDecl->isLocalExternDecl() && !IsQualifiedFriend) {
- if (IsMicrosoft && IsDefinition) {
+ if (IsMicrosoftABI && IsDefinition) {
S.Diag(NewDecl->getLocation(),
diag::warn_redeclaration_without_import_attribute)
<< NewDecl;
@@ -6534,7 +6578,7 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
OldDecl->dropAttr<DLLImportAttr>();
NewDecl->dropAttr<DLLImportAttr>();
}
- } else if (IsInline && OldImportAttr && !IsMicrosoft) {
+ } else if (IsInline && OldImportAttr && !IsMicrosoftABI) {
// In MinGW, seeing a function declared inline drops the dllimport
// attribute.
OldDecl->dropAttr<DLLImportAttr>();
@@ -6713,14 +6757,16 @@ static bool diagnoseOpenCLTypes(Scope *S, Sema &Se, Declarator &D,
}
// OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed.
- QualType NR = R;
- while (NR->isPointerType()) {
- if (NR->isFunctionPointerType()) {
- Se.Diag(D.getIdentifierLoc(), diag::err_opencl_function_pointer);
- D.setInvalidType();
- return false;
+ if (!Se.getOpenCLOptions().isEnabled("__cl_clang_function_pointers")) {
+ QualType NR = R;
+ while (NR->isPointerType() || NR->isMemberFunctionPointerType()) {
+ if (NR->isFunctionPointerType() || NR->isMemberFunctionPointerType()) {
+ Se.Diag(D.getIdentifierLoc(), diag::err_opencl_function_pointer);
+ D.setInvalidType();
+ return false;
+ }
+ NR = NR->getPointeeType();
}
- NR = NR->getPointeeType();
}
if (!Se.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
@@ -6852,6 +6898,12 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
}
+ // If this variable has a variable-modified type and an initializer, try to
+ // fold to a constant-sized type. This is otherwise invalid.
+ if (D.hasInitializer() && R->isVariablyModifiedType())
+ tryToFixVariablyModifiedVarType(*this, TInfo, R, D.getIdentifierLoc(),
+ /*DiagID=*/0);
+
bool IsMemberSpecialization = false;
bool IsVariableTemplateSpecialization = false;
bool IsPartialSpecialization = false;
@@ -6970,19 +7022,18 @@ NamedDecl *Sema::ActOnVariableDeclarator(
TemplateParams->getRAngleLoc());
TemplateParams = nullptr;
} else {
+ // Check that we can declare a template here.
+ if (CheckTemplateDeclScope(S, TemplateParams))
+ return nullptr;
+
if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
// This is an explicit specialization or a partial specialization.
- // FIXME: Check that we can declare a specialization here.
IsVariableTemplateSpecialization = true;
IsPartialSpecialization = TemplateParams->size() > 0;
} else { // if (TemplateParams->size() > 0)
// This is a template declaration.
IsVariableTemplate = true;
- // Check that we can declare a template here.
- if (CheckTemplateDeclScope(S, TemplateParams))
- return nullptr;
-
// Only C++1y supports variable templates (N3651).
Diag(D.getIdentifierLoc(),
getLangOpts().CPlusPlus14
@@ -6991,6 +7042,10 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
}
} else {
+ // Check that we can declare a member specialization here.
+ if (!TemplateParamLists.empty() && IsMemberSpecialization &&
+ CheckTemplateDeclScope(S, TemplateParamLists.back()))
+ return nullptr;
assert((Invalid ||
D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) &&
"should have a 'template<>' for this decl");
@@ -7110,16 +7165,16 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
switch (D.getDeclSpec().getConstexprSpecifier()) {
- case CSK_unspecified:
+ case ConstexprSpecKind::Unspecified:
break;
- case CSK_consteval:
+ case ConstexprSpecKind::Consteval:
Diag(D.getDeclSpec().getConstexprSpecLoc(),
- diag::err_constexpr_wrong_decl_kind)
- << D.getDeclSpec().getConstexprSpecifier();
+ diag::err_constexpr_wrong_decl_kind)
+ << static_cast<int>(D.getDeclSpec().getConstexprSpecifier());
LLVM_FALLTHROUGH;
- case CSK_constexpr:
+ case ConstexprSpecKind::Constexpr:
NewVD->setConstexpr(true);
MaybeAddCUDAConstantAttr(NewVD);
// C++1z [dcl.spec.constexpr]p1:
@@ -7131,7 +7186,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
NewVD->setImplicitlyInline();
break;
- case CSK_constinit:
+ case ConstexprSpecKind::Constinit:
if (!NewVD->hasGlobalStorage())
Diag(D.getDeclSpec().getConstexprSpecLoc(),
diag::err_constinit_local_variable);
@@ -7173,9 +7228,10 @@ NamedDecl *Sema::ActOnVariableDeclarator(
<< FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
else if (NewVD->hasLocalStorage())
Diag(NewVD->getLocation(), diag::err_module_private_local)
- << 0 << NewVD->getDeclName()
- << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
- << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+ << 0 << NewVD
+ << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getModulePrivateSpecLoc());
else {
NewVD->setModulePrivate();
if (NewTemplate)
@@ -7982,7 +8038,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
- Diag(NewVD->getLocation(), diag::warn_illegal_constant_array_size);
+ Diag(NewVD->getLocation(), diag::ext_vla_folded_to_constant);
NewVD->setType(FixedT);
NewVD->setTypeSourceInfo(FixedTInfo);
}
@@ -8022,6 +8078,14 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
NewVD->setInvalidDecl();
return;
}
+
+ // PPC MMA non-pointer types are not allowed as non-local variable types.
+ if (Context.getTargetInfo().getTriple().isPPC64() &&
+ !NewVD->isLocalVarDecl() &&
+ CheckPPCMMAType(T, NewVD->getLocation())) {
+ NewVD->setInvalidDecl();
+ return;
+ }
}
/// Perform semantic checking on a newly-created variable
@@ -8056,73 +8120,54 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous) {
return false;
}
-namespace {
-struct FindOverriddenMethod {
- Sema *S;
- CXXMethodDecl *Method;
-
- /// Member lookup function that determines whether a given C++
- /// method overrides a method in a base class, to be used with
- /// CXXRecordDecl::lookupInBases().
- bool operator()(const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
- RecordDecl *BaseRecord =
- Specifier->getType()->castAs<RecordType>()->getDecl();
+/// AddOverriddenMethods - See if a method overrides any in the base classes,
+/// and if so, check that it's a valid override and remember it.
+bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
+ llvm::SmallPtrSet<const CXXMethodDecl*, 4> Overridden;
- DeclarationName Name = Method->getDeclName();
+ // Look for methods in base classes that this method might override.
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/false,
+ /*DetectVirtual=*/false);
+ auto VisitBase = [&] (const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ CXXRecordDecl *BaseRecord = Specifier->getType()->getAsCXXRecordDecl();
+ DeclarationName Name = MD->getDeclName();
- // FIXME: Do we care about other names here too?
if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// We really want to find the base class destructor here.
- QualType T = S->Context.getTypeDeclType(BaseRecord);
- CanQualType CT = S->Context.getCanonicalType(T);
-
- Name = S->Context.DeclarationNames.getCXXDestructorName(CT);
- }
-
- for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
- Path.Decls = Path.Decls.slice(1)) {
- NamedDecl *D = Path.Decls.front();
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
- if (MD->isVirtual() &&
- !S->IsOverload(
- Method, MD, /*UseMemberUsingDeclRules=*/false,
- /*ConsiderCudaAttrs=*/true,
- // C++2a [class.virtual]p2 does not consider requires clauses
- // when overriding.
- /*ConsiderRequiresClauses=*/false))
- return true;
+ QualType T = Context.getTypeDeclType(BaseRecord);
+ CanQualType CT = Context.getCanonicalType(T);
+ Name = Context.DeclarationNames.getCXXDestructorName(CT);
+ }
+
+ for (NamedDecl *BaseND : BaseRecord->lookup(Name)) {
+ CXXMethodDecl *BaseMD =
+ dyn_cast<CXXMethodDecl>(BaseND->getCanonicalDecl());
+ if (!BaseMD || !BaseMD->isVirtual() ||
+ IsOverload(MD, BaseMD, /*UseMemberUsingDeclRules=*/false,
+ /*ConsiderCudaAttrs=*/true,
+ // C++2a [class.virtual]p2 does not consider requires
+ // clauses when overriding.
+ /*ConsiderRequiresClauses=*/false))
+ continue;
+
+ if (Overridden.insert(BaseMD).second) {
+ MD->addOverriddenMethod(BaseMD);
+ CheckOverridingFunctionReturnType(MD, BaseMD);
+ CheckOverridingFunctionAttributes(MD, BaseMD);
+ CheckOverridingFunctionExceptionSpec(MD, BaseMD);
+ CheckIfOverriddenFunctionIsMarkedFinal(MD, BaseMD);
}
+
+ // A method can only override one function from each base class. We
+ // don't track indirectly overridden methods from bases of bases.
+ return true;
}
return false;
- }
-};
-} // end anonymous namespace
-
-/// AddOverriddenMethods - See if a method overrides any in the base classes,
-/// and if so, check that it's a valid override and remember it.
-bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
- // Look for methods in base classes that this method might override.
- CXXBasePaths Paths;
- FindOverriddenMethod FOM;
- FOM.Method = MD;
- FOM.S = this;
- bool AddedAny = false;
- if (DC->lookupInBases(FOM, Paths)) {
- for (auto *I : Paths.found_decls()) {
- if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(I)) {
- MD->addOverriddenMethod(OldMD->getCanonicalDecl());
- if (!CheckOverridingFunctionReturnType(MD, OldMD) &&
- !CheckOverridingFunctionAttributes(MD, OldMD) &&
- !CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
- !CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
- AddedAny = true;
- }
- }
- }
- }
+ };
- return AddedAny;
+ DC->lookupInBases(VisitBase, Paths);
+ return !Overridden.empty();
}
namespace {
@@ -8401,7 +8446,7 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
NewFD = FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(), NameInfo,
R, TInfo, SC, isInline, HasPrototype,
- CSK_unspecified,
+ ConstexprSpecKind::Unspecified,
/*TrailingRequiresClause=*/nullptr);
if (D.isInvalidType())
NewFD->setInvalidDecl();
@@ -8412,11 +8457,11 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
ExplicitSpecifier ExplicitSpecifier = D.getDeclSpec().getExplicitSpecifier();
ConstexprSpecKind ConstexprKind = D.getDeclSpec().getConstexprSpecifier();
- if (ConstexprKind == CSK_constinit) {
+ if (ConstexprKind == ConstexprSpecKind::Constinit) {
SemaRef.Diag(D.getDeclSpec().getConstexprSpecLoc(),
diag::err_constexpr_wrong_decl_kind)
- << ConstexprKind;
- ConstexprKind = CSK_unspecified;
+ << static_cast<int>(ConstexprKind);
+ ConstexprKind = ConstexprSpecKind::Unspecified;
D.getMutableDeclSpec().ClearConstexprSpec();
}
Expr *TrailingRequiresClause = D.getTrailingRequiresClause();
@@ -8570,12 +8615,21 @@ static bool isOpenCLSizeDependentType(ASTContext &C, QualType Ty) {
static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
if (PT->isPointerType()) {
QualType PointeeType = PT->getPointeeType();
- if (PointeeType->isPointerType())
- return PtrPtrKernelParam;
if (PointeeType.getAddressSpace() == LangAS::opencl_generic ||
PointeeType.getAddressSpace() == LangAS::opencl_private ||
PointeeType.getAddressSpace() == LangAS::Default)
return InvalidAddrSpacePtrKernelParam;
+
+ if (PointeeType->isPointerType()) {
+ // This is a pointer to pointer parameter.
+ // Recursively check inner type.
+ OpenCLParamType ParamKind = getOpenCLKernelParameterType(S, PointeeType);
+ if (ParamKind == InvalidAddrSpacePtrKernelParam ||
+ ParamKind == InvalidKernelParam)
+ return ParamKind;
+
+ return PtrPtrKernelParam;
+ }
return PtrKernelParam;
}
@@ -8628,11 +8682,17 @@ static void checkIsValidOpenCLKernelParameter(
switch (getOpenCLKernelParameterType(S, PT)) {
case PtrPtrKernelParam:
- // OpenCL v1.2 s6.9.a:
- // A kernel function argument cannot be declared as a
- // pointer to a pointer type.
- S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param);
- D.setInvalidType();
+ // OpenCL v3.0 s6.11.a:
+ // A kernel function argument cannot be declared as a pointer to a pointer
+ // type. [...] This restriction only applies to OpenCL C 1.2 or below.
+ if (S.getLangOpts().OpenCLVersion < 120 &&
+ !S.getLangOpts().OpenCLCPlusPlus) {
+ S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param);
+ D.setInvalidType();
+ return;
+ }
+
+ ValidTypes.insert(PT.getTypePtr());
return;
case InvalidAddrSpacePtrKernelParam:
@@ -8916,13 +8976,13 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TemplateParamLists, isFriend, isMemberSpecialization,
Invalid);
if (TemplateParams) {
+ // Check that we can declare a template here.
+ if (CheckTemplateDeclScope(S, TemplateParams))
+ NewFD->setInvalidDecl();
+
if (TemplateParams->size() > 0) {
// This is a function template
- // Check that we can declare a template here.
- if (CheckTemplateDeclScope(S, TemplateParams))
- NewFD->setInvalidDecl();
-
// A destructor cannot be a template.
if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
Diag(NewFD->getLocation(), diag::err_destructor_template);
@@ -8981,6 +9041,11 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
} else {
+ // Check that we can declare a template here.
+ if (!TemplateParamLists.empty() && isMemberSpecialization &&
+ CheckTemplateDeclScope(S, TemplateParamLists.back()))
+ NewFD->setInvalidDecl();
+
// All template param lists were matched against the scope specifier:
// this is NOT (an explicit specialization of) a template.
if (TemplateParamLists.size() > 0)
@@ -9075,8 +9140,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
- if (ConstexprSpecKind ConstexprKind =
- D.getDeclSpec().getConstexprSpecifier()) {
+ ConstexprSpecKind ConstexprKind = D.getDeclSpec().getConstexprSpecifier();
+ if (ConstexprKind != ConstexprSpecKind::Unspecified) {
// C++11 [dcl.constexpr]p2: constexpr functions and constexpr constructors
// are implicitly inline.
NewFD->setImplicitlyInline();
@@ -9085,15 +9150,18 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// be either constructors or to return a literal type. Therefore,
// destructors cannot be declared constexpr.
if (isa<CXXDestructorDecl>(NewFD) &&
- (!getLangOpts().CPlusPlus20 || ConstexprKind == CSK_consteval)) {
+ (!getLangOpts().CPlusPlus20 ||
+ ConstexprKind == ConstexprSpecKind::Consteval)) {
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor)
- << ConstexprKind;
- NewFD->setConstexprKind(getLangOpts().CPlusPlus20 ? CSK_unspecified : CSK_constexpr);
+ << static_cast<int>(ConstexprKind);
+ NewFD->setConstexprKind(getLangOpts().CPlusPlus20
+ ? ConstexprSpecKind::Unspecified
+ : ConstexprSpecKind::Constexpr);
}
// C++20 [dcl.constexpr]p2: An allocation function, or a
// deallocation function shall not be declared with the consteval
// specifier.
- if (ConstexprKind == CSK_consteval &&
+ if (ConstexprKind == ConstexprSpecKind::Consteval &&
(NewFD->getOverloadedOperator() == OO_New ||
NewFD->getOverloadedOperator() == OO_Array_New ||
NewFD->getOverloadedOperator() == OO_Delete ||
@@ -9101,7 +9169,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Diag(D.getDeclSpec().getConstexprSpecLoc(),
diag::err_invalid_consteval_decl_kind)
<< NewFD;
- NewFD->setConstexprKind(CSK_constexpr);
+ NewFD->setConstexprKind(ConstexprSpecKind::Constexpr);
}
}
@@ -9132,17 +9200,17 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// If a function is defined as defaulted or deleted, mark it as such now.
// We'll do the relevant checks on defaulted / deleted functions later.
switch (D.getFunctionDefinitionKind()) {
- case FDK_Declaration:
- case FDK_Definition:
- break;
+ case FunctionDefinitionKind::Declaration:
+ case FunctionDefinitionKind::Definition:
+ break;
- case FDK_Defaulted:
- NewFD->setDefaulted();
- break;
+ case FunctionDefinitionKind::Defaulted:
+ NewFD->setDefaulted();
+ break;
- case FDK_Deleted:
- NewFD->setDeletedAsWritten();
- break;
+ case FunctionDefinitionKind::Deleted:
+ NewFD->setDeletedAsWritten();
+ break;
}
if (isa<CXXMethodDecl>(NewFD) && DC == CurContext &&
@@ -9459,12 +9527,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// that either the specialized function type or the specialized
// template is dependent, and therefore matching will fail. In
// this case, don't check the specialization yet.
- bool InstantiationDependent = false;
if (isFunctionTemplateSpecialization && isFriend &&
(NewFD->getType()->isDependentType() || DC->isDependentContext() ||
- TemplateSpecializationType::anyDependentTemplateArguments(
- TemplateArgs,
- InstantiationDependent))) {
+ TemplateSpecializationType::anyInstantiationDependentTemplateArguments(
+ TemplateArgs.arguments()))) {
assert(HasExplicitTemplateArgs &&
"friend function specialization without template args");
if (CheckDependentFunctionTemplateSpecialization(NewFD, TemplateArgs,
@@ -9636,6 +9702,36 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
+ // If this is the first declaration of a library builtin function, add
+ // attributes as appropriate.
+ if (!D.isRedeclaration() &&
+ NewFD->getDeclContext()->getRedeclContext()->isFileContext()) {
+ if (IdentifierInfo *II = Previous.getLookupName().getAsIdentifierInfo()) {
+ if (unsigned BuiltinID = II->getBuiltinID()) {
+ if (NewFD->getLanguageLinkage() == CLanguageLinkage) {
+ // Validate the type matches unless this builtin is specified as
+ // matching regardless of its declared type.
+ if (Context.BuiltinInfo.allowTypeMismatch(BuiltinID)) {
+ NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
+ } else {
+ ASTContext::GetBuiltinTypeError Error;
+ LookupNecessaryTypesForBuiltin(S, BuiltinID);
+ QualType BuiltinType = Context.GetBuiltinType(BuiltinID, Error);
+
+ if (!Error && !BuiltinType.isNull() &&
+ Context.hasSameFunctionTypeIgnoringExceptionSpec(
+ NewFD->getType(), BuiltinType))
+ NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
+ }
+ } else if (BuiltinID == Builtin::BI__GetExceptionInfo &&
+ Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ // FIXME: We should consider this a builtin only in the std namespace.
+ NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
+ }
+ }
+ }
+ }
+
ProcessPragmaWeak(S, NewFD);
checkAttributesAfterMerging(*this, *NewFD);
@@ -9813,17 +9909,17 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// because Sema::ActOnStartOfFunctionDef has not been called yet.
if (const auto *NBA = NewFD->getAttr<NoBuiltinAttr>())
switch (D.getFunctionDefinitionKind()) {
- case FDK_Defaulted:
- case FDK_Deleted:
+ case FunctionDefinitionKind::Defaulted:
+ case FunctionDefinitionKind::Deleted:
Diag(NBA->getLocation(),
diag::err_attribute_no_builtin_on_defaulted_deleted_function)
<< NBA->getSpelling();
break;
- case FDK_Declaration:
+ case FunctionDefinitionKind::Declaration:
Diag(NBA->getLocation(), diag::err_attribute_no_builtin_on_non_definition)
<< NBA->getSpelling();
break;
- case FDK_Definition:
+ case FunctionDefinitionKind::Definition:
break;
}
@@ -9996,31 +10092,50 @@ static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
// multiversion functions.
static bool AttrCompatibleWithMultiVersion(attr::Kind Kind,
MultiVersionKind MVType) {
+ // Note: this list/diagnosis must match the list in
+ // checkMultiversionAttributesAllSame.
switch (Kind) {
default:
return false;
case attr::Used:
return MVType == MultiVersionKind::Target;
+ case attr::NonNull:
+ case attr::NoThrow:
+ return true;
}
}
-static bool HasNonMultiVersionAttributes(const FunctionDecl *FD,
- MultiVersionKind MVType) {
+static bool checkNonMultiVersionCompatAttributes(Sema &S,
+ const FunctionDecl *FD,
+ const FunctionDecl *CausedFD,
+ MultiVersionKind MVType) {
+ bool IsCPUSpecificCPUDispatchMVType =
+ MVType == MultiVersionKind::CPUDispatch ||
+ MVType == MultiVersionKind::CPUSpecific;
+ const auto Diagnose = [FD, CausedFD, IsCPUSpecificCPUDispatchMVType](
+ Sema &S, const Attr *A) {
+ S.Diag(FD->getLocation(), diag::err_multiversion_disallowed_other_attr)
+ << IsCPUSpecificCPUDispatchMVType << A;
+ if (CausedFD)
+ S.Diag(CausedFD->getLocation(), diag::note_multiversioning_caused_here);
+ return true;
+ };
+
for (const Attr *A : FD->attrs()) {
switch (A->getKind()) {
case attr::CPUDispatch:
case attr::CPUSpecific:
if (MVType != MultiVersionKind::CPUDispatch &&
MVType != MultiVersionKind::CPUSpecific)
- return true;
+ return Diagnose(S, A);
break;
case attr::Target:
if (MVType != MultiVersionKind::Target)
- return true;
+ return Diagnose(S, A);
break;
default:
if (!AttrCompatibleWithMultiVersion(A->getKind(), MVType))
- return true;
+ return Diagnose(S, A);
break;
}
}
@@ -10154,18 +10269,12 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
MVType == MultiVersionKind::CPUDispatch ||
MVType == MultiVersionKind::CPUSpecific;
- // For now, disallow all other attributes. These should be opt-in, but
- // an analysis of all of them is a future FIXME.
- if (CausesMV && OldFD && HasNonMultiVersionAttributes(OldFD, MVType)) {
- S.Diag(OldFD->getLocation(), diag::err_multiversion_no_other_attrs)
- << IsCPUSpecificCPUDispatchMVType;
- S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
+ if (CausesMV && OldFD &&
+ checkNonMultiVersionCompatAttributes(S, OldFD, NewFD, MVType))
return true;
- }
- if (HasNonMultiVersionAttributes(NewFD, MVType))
- return S.Diag(NewFD->getLocation(), diag::err_multiversion_no_other_attrs)
- << IsCPUSpecificCPUDispatchMVType;
+ if (checkNonMultiVersionCompatAttributes(S, NewFD, nullptr, MVType))
+ return true;
// Only allow transition to MultiVersion if it hasn't been used.
if (OldFD && CausesMV && OldFD->isUsed(false))
@@ -10618,6 +10727,12 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
MergeTypeWithPrevious, Previous))
return Redeclaration;
+ // PPC MMA non-pointer types are not allowed as function return types.
+ if (Context.getTargetInfo().getTriple().isPPC64() &&
+ CheckPPCMMAType(NewFD->getReturnType(), NewFD->getLocation())) {
+ NewFD->setInvalidDecl();
+ }
+
// C++11 [dcl.constexpr]p8:
// A constexpr specifier for a non-static member function that is not
// a constructor declares that member function to be const.
@@ -10680,7 +10795,6 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
NewTemplateDecl->mergePrevDecl(OldTemplateDecl);
NewFD->setPreviousDeclaration(OldFD);
- adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
if (NewFD->isCXXClassMember()) {
NewFD->setAccess(OldTemplateDecl->getAccess());
NewTemplateDecl->setAccess(OldTemplateDecl->getAccess());
@@ -10707,7 +10821,6 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
auto *OldFD = cast<FunctionDecl>(OldDecl);
// This needs to happen first so that 'inline' propagates.
NewFD->setPreviousDeclaration(OldFD);
- adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
if (NewFD->isCXXClassMember())
NewFD->setAccess(OldFD->getAccess());
}
@@ -10737,6 +10850,9 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
}
}
+ if (LangOpts.OpenMP)
+ ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(NewFD);
+
// Semantic checking for this function declaration (in isolation).
if (getLangOpts().CPlusPlus) {
@@ -10815,7 +10931,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// declaration against the expected type for the builtin.
if (unsigned BuiltinID = NewFD->getBuiltinID()) {
ASTContext::GetBuiltinTypeError Error;
- LookupPredefedObjCSuperType(*this, S, NewFD->getIdentifier());
+ LookupNecessaryTypesForBuiltin(S, BuiltinID);
QualType T = Context.GetBuiltinType(BuiltinID, Error);
// If the type of the builtin differs only in its exception
// specification, that's OK.
@@ -10911,7 +11027,7 @@ void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_main)
<< FD->isConsteval()
<< FixItHint::CreateRemoval(DS.getConstexprSpecLoc());
- FD->setConstexprKind(CSK_unspecified);
+ FD->setConstexprKind(ConstexprSpecKind::Unspecified);
}
if (getLangOpts().OpenCL) {
@@ -11067,8 +11183,14 @@ bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
// except that the aforementioned are allowed in unevaluated
// expressions. Everything else falls under the
// "may accept other forms of constant expressions" exception.
- // (We never end up here for C++, so the constant expression
- // rules there don't matter.)
+ //
+ // Regular C++ code will not end up here (exceptions: language extensions,
+ // OpenCL C++ etc), so the constant expression rules there don't matter.
+ if (Init->isValueDependent()) {
+ assert(Init->containsErrors() &&
+ "Dependent code should only occur in error-recovery path.");
+ return true;
+ }
const Expr *Culprit;
if (Init->isConstantInitializer(Context, false, &Culprit))
return false;
@@ -12033,7 +12155,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// Try to correct any TypoExprs in the initialization arguments.
for (size_t Idx = 0; Idx < Args.size(); ++Idx) {
ExprResult Res = CorrectDelayedTyposInExpr(
- Args[Idx], VDecl, /*RecoverUncorrectedTypos=*/false,
+ Args[Idx], VDecl, /*RecoverUncorrectedTypos=*/true,
[this, Entity, Kind](Expr *E) {
InitializationSequence Init(*this, Entity, Kind, MultiExprArg(E));
return Init.Failed() ? ExprError() : E;
@@ -12402,7 +12524,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
!Context.getTargetInfo().getCXXABI().isMicrosoft()) {
Diag(Var->getLocation(),
diag::err_constexpr_static_mem_var_requires_init)
- << Var->getDeclName();
+ << Var;
Var->setInvalidDecl();
return;
}
@@ -12424,6 +12546,17 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
}
if (!Var->isInvalidDecl() && RealDecl->hasAttr<LoaderUninitializedAttr>()) {
+ if (Var->getStorageClass() == SC_Extern) {
+ Diag(Var->getLocation(), diag::err_loader_uninitialized_extern_decl)
+ << Var;
+ Var->setInvalidDecl();
+ return;
+ }
+ if (RequireCompleteType(Var->getLocation(), Var->getType(),
+ diag::err_typecheck_decl_incomplete_type)) {
+ Var->setInvalidDecl();
+ return;
+ }
if (CXXRecordDecl *RD = Var->getType()->getAsCXXRecordDecl()) {
if (!RD->hasTrivialDefaultConstructor()) {
Diag(Var->getLocation(), diag::err_loader_uninitialized_trivial_ctor);
@@ -12431,12 +12564,6 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
return;
}
}
- if (Var->getStorageClass() == SC_Extern) {
- Diag(Var->getLocation(), diag::err_loader_uninitialized_extern_decl)
- << Var;
- Var->setInvalidDecl();
- return;
- }
}
VarDecl::DefinitionKind DefKind = Var->isThisDeclarationADefinition();
@@ -12535,8 +12662,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
// definitions with reference type.
if (Type->isReferenceType()) {
Diag(Var->getLocation(), diag::err_reference_var_requires_init)
- << Var->getDeclName()
- << SourceRange(Var->getLocation(), Var->getLocation());
+ << Var << SourceRange(Var->getLocation(), Var->getLocation());
Var->setInvalidDecl();
return;
}
@@ -12667,9 +12793,21 @@ void Sema::ActOnCXXForRangeDecl(Decl *D) {
Error = 4;
break;
}
+
+ // for-range-declaration cannot be given a storage class specifier con't.
+ switch (VD->getTSCSpec()) {
+ case TSCS_thread_local:
+ Error = 6;
+ break;
+ case TSCS___thread:
+ case TSCS__Thread_local:
+ case TSCS_unspecified:
+ break;
+ }
+
if (Error != -1) {
Diag(VD->getOuterLocStart(), diag::err_for_range_storage_class)
- << VD->getDeclName() << Error;
+ << VD << Error;
D->setInvalidDecl();
}
}
@@ -12691,7 +12829,7 @@ Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
DS.SetTypeSpecType(DeclSpec::TST_auto, IdentLoc, PrevSpec, DiagID,
getPrintingPolicy());
- Declarator D(DS, DeclaratorContext::ForContext);
+ Declarator D(DS, DeclaratorContext::ForInit);
D.SetIdentifier(Ident, IdentLoc);
D.takeAttributes(Attrs, AttrEnd);
@@ -12835,20 +12973,64 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
AttributeCommonInfo::AS_Pragma));
}
+ if (!var->getType()->isStructureType() && var->hasInit() &&
+ isa<InitListExpr>(var->getInit())) {
+ const auto *ILE = cast<InitListExpr>(var->getInit());
+ unsigned NumInits = ILE->getNumInits();
+ if (NumInits > 2)
+ for (unsigned I = 0; I < NumInits; ++I) {
+ const auto *Init = ILE->getInit(I);
+ if (!Init)
+ break;
+ const auto *SL = dyn_cast<StringLiteral>(Init->IgnoreImpCasts());
+ if (!SL)
+ break;
+
+ unsigned NumConcat = SL->getNumConcatenated();
+ // Diagnose missing comma in string array initialization.
+ // Do not warn when all the elements in the initializer are concatenated
+ // together. Do not warn for macros too.
+ if (NumConcat == 2 && !SL->getBeginLoc().isMacroID()) {
+ bool OnlyOneMissingComma = true;
+ for (unsigned J = I + 1; J < NumInits; ++J) {
+ const auto *Init = ILE->getInit(J);
+ if (!Init)
+ break;
+ const auto *SLJ = dyn_cast<StringLiteral>(Init->IgnoreImpCasts());
+ if (!SLJ || SLJ->getNumConcatenated() > 1) {
+ OnlyOneMissingComma = false;
+ break;
+ }
+ }
+
+ if (OnlyOneMissingComma) {
+ SmallVector<FixItHint, 1> Hints;
+ for (unsigned i = 0; i < NumConcat - 1; ++i)
+ Hints.push_back(FixItHint::CreateInsertion(
+ PP.getLocForEndOfToken(SL->getStrTokenLoc(i)), ","));
+
+ Diag(SL->getStrTokenLoc(1),
+ diag::warn_concatenated_literal_array_init)
+ << Hints;
+ Diag(SL->getBeginLoc(),
+ diag::note_concatenated_string_literal_silence);
+ }
+ // In any case, stop now.
+ break;
+ }
+ }
+ }
+
// All the following checks are C++ only.
if (!getLangOpts().CPlusPlus) {
- // If this variable must be emitted, add it as an initializer for the
- // current module.
- if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
- Context.addModuleInitializer(ModuleScopes.back().Module, var);
- return;
+ // If this variable must be emitted, add it as an initializer for the
+ // current module.
+ if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
+ Context.addModuleInitializer(ModuleScopes.back().Module, var);
+ return;
}
- if (auto *DD = dyn_cast<DecompositionDecl>(var))
- CheckCompleteDecompositionDeclaration(DD);
-
QualType type = var->getType();
- if (type->isDependentType()) return;
if (var->hasAttr<BlocksAttr>())
getCurFunction()->addByrefBlockVar(var);
@@ -12857,79 +13039,93 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
bool IsGlobal = GlobalStorage && !var->isStaticLocal();
QualType baseType = Context.getBaseElementType(type);
- if (Init && !Init->isValueDependent()) {
- if (var->isConstexpr()) {
- SmallVector<PartialDiagnosticAt, 8> Notes;
- if (!var->evaluateValue(Notes) || !var->isInitICE()) {
- SourceLocation DiagLoc = var->getLocation();
- // If the note doesn't add any useful information other than a source
- // location, fold it into the primary diagnostic.
- if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
- diag::note_invalid_subexpr_in_const_expr) {
- DiagLoc = Notes[0].first;
- Notes.clear();
- }
- Diag(DiagLoc, diag::err_constexpr_var_requires_const_init)
- << var << Init->getSourceRange();
- for (unsigned I = 0, N = Notes.size(); I != N; ++I)
- Diag(Notes[I].first, Notes[I].second);
+ // Check whether the initializer is sufficiently constant.
+ if (!type->isDependentType() && Init && !Init->isValueDependent() &&
+ (GlobalStorage || var->isConstexpr() ||
+ var->mightBeUsableInConstantExpressions(Context))) {
+ // If this variable might have a constant initializer or might be usable in
+ // constant expressions, check whether or not it actually is now. We can't
+ // do this lazily, because the result might depend on things that change
+ // later, such as which constexpr functions happen to be defined.
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ bool HasConstInit;
+ if (!getLangOpts().CPlusPlus11) {
+ // Prior to C++11, in contexts where a constant initializer is required,
+ // the set of valid constant initializers is described by syntactic rules
+ // in [expr.const]p2-6.
+ // FIXME: Stricter checking for these rules would be useful for constinit /
+ // -Wglobal-constructors.
+ HasConstInit = checkConstInit();
+
+ // Compute and cache the constant value, and remember that we have a
+ // constant initializer.
+ if (HasConstInit) {
+ (void)var->checkForConstantInitialization(Notes);
+ Notes.clear();
+ } else if (CacheCulprit) {
+ Notes.emplace_back(CacheCulprit->getExprLoc(),
+ PDiag(diag::note_invalid_subexpr_in_const_expr));
+ Notes.back().second << CacheCulprit->getSourceRange();
}
- } else if (var->mightBeUsableInConstantExpressions(Context)) {
- // Check whether the initializer of a const variable of integral or
- // enumeration type is an ICE now, since we can't tell whether it was
- // initialized by a constant expression if we check later.
- var->checkInitIsICE();
- }
-
- // Don't emit further diagnostics about constexpr globals since they
- // were just diagnosed.
- if (!var->isConstexpr() && GlobalStorage && var->hasAttr<ConstInitAttr>()) {
- // FIXME: Need strict checking in C++03 here.
- bool DiagErr = getLangOpts().CPlusPlus11
- ? !var->checkInitIsICE() : !checkConstInit();
- if (DiagErr) {
- auto *Attr = var->getAttr<ConstInitAttr>();
- Diag(var->getLocation(), diag::err_require_constant_init_failed)
- << Init->getSourceRange();
- Diag(Attr->getLocation(),
- diag::note_declared_required_constant_init_here)
- << Attr->getRange() << Attr->isConstinit();
- if (getLangOpts().CPlusPlus11) {
- APValue Value;
- SmallVector<PartialDiagnosticAt, 8> Notes;
- Init->EvaluateAsInitializer(Value, getASTContext(), var, Notes);
- for (auto &it : Notes)
- Diag(it.first, it.second);
- } else {
- Diag(CacheCulprit->getExprLoc(),
- diag::note_invalid_subexpr_in_const_expr)
- << CacheCulprit->getSourceRange();
- }
+ } else {
+ // Evaluate the initializer to see if it's a constant initializer.
+ HasConstInit = var->checkForConstantInitialization(Notes);
+ }
+
+ if (HasConstInit) {
+ // FIXME: Consider replacing the initializer with a ConstantExpr.
+ } else if (var->isConstexpr()) {
+ SourceLocation DiagLoc = var->getLocation();
+ // If the note doesn't add any useful information other than a source
+ // location, fold it into the primary diagnostic.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
}
- }
- else if (!var->isConstexpr() && IsGlobal &&
- !getDiagnostics().isIgnored(diag::warn_global_constructor,
- var->getLocation())) {
+ Diag(DiagLoc, diag::err_constexpr_var_requires_const_init)
+ << var << Init->getSourceRange();
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ Diag(Notes[I].first, Notes[I].second);
+ } else if (GlobalStorage && var->hasAttr<ConstInitAttr>()) {
+ auto *Attr = var->getAttr<ConstInitAttr>();
+ Diag(var->getLocation(), diag::err_require_constant_init_failed)
+ << Init->getSourceRange();
+ Diag(Attr->getLocation(), diag::note_declared_required_constant_init_here)
+ << Attr->getRange() << Attr->isConstinit();
+ for (auto &it : Notes)
+ Diag(it.first, it.second);
+ } else if (IsGlobal &&
+ !getDiagnostics().isIgnored(diag::warn_global_constructor,
+ var->getLocation())) {
// Warn about globals which don't have a constant initializer. Don't
// warn about globals with a non-trivial destructor because we already
// warned about them.
CXXRecordDecl *RD = baseType->getAsCXXRecordDecl();
if (!(RD && !RD->hasTrivialDestructor())) {
+ // checkConstInit() here permits trivial default initialization even in
+ // C++11 onwards, where such an initializer is not a constant initializer
+ // but nonetheless doesn't require a global constructor.
if (!checkConstInit())
Diag(var->getLocation(), diag::warn_global_constructor)
- << Init->getSourceRange();
+ << Init->getSourceRange();
}
}
}
// Require the destructor.
- if (const RecordType *recordType = baseType->getAs<RecordType>())
- FinalizeVarWithDestructor(var, recordType);
+ if (!type->isDependentType())
+ if (const RecordType *recordType = baseType->getAs<RecordType>())
+ FinalizeVarWithDestructor(var, recordType);
// If this variable must be emitted, add it as an initializer for the current
// module.
if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
Context.addModuleInitializer(ModuleScopes.back().Module, var);
+
+ // Build the bindings if this is a structured binding declaration.
+ if (auto *DD = dyn_cast<DecompositionDecl>(var))
+ CheckCompleteDecompositionDeclaration(DD);
}
/// Determines if a variable's alignment is dependent.
@@ -13041,32 +13237,9 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
}
}
- if (VD->isStaticLocal()) {
+ if (VD->isStaticLocal())
CheckStaticLocalForDllExport(VD);
- if (dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod())) {
- // CUDA 8.0 E.3.9.4: Within the body of a __device__ or __global__
- // function, only __shared__ variables or variables without any device
- // memory qualifiers may be declared with static storage class.
- // Note: It is unclear how a function-scope non-const static variable
- // without device memory qualifier is implemented, therefore only static
- // const variable without device memory qualifier is allowed.
- [&]() {
- if (!getLangOpts().CUDA)
- return;
- if (VD->hasAttr<CUDASharedAttr>())
- return;
- if (VD->getType().isConstQualified() &&
- !(VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>()))
- return;
- if (CUDADiagIfDeviceCode(VD->getLocation(),
- diag::err_device_static_local_var)
- << CurrentCUDATarget())
- VD->setInvalidDecl();
- }();
- }
- }
-
// Perform check for initializers of device-side global variables.
// CUDA allows empty constructors as initializers (see E.2.3.1, CUDA
// 7.5). We must also apply the same checks to all __shared__
@@ -13144,20 +13317,20 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
if (!MagicValueExpr) {
continue;
}
- llvm::APSInt MagicValueInt;
- if (!MagicValueExpr->isIntegerConstantExpr(MagicValueInt, Context)) {
+ Optional<llvm::APSInt> MagicValueInt;
+ if (!(MagicValueInt = MagicValueExpr->getIntegerConstantExpr(Context))) {
Diag(I->getRange().getBegin(),
diag::err_type_tag_for_datatype_not_ice)
<< LangOpts.CPlusPlus << MagicValueExpr->getSourceRange();
continue;
}
- if (MagicValueInt.getActiveBits() > 64) {
+ if (MagicValueInt->getActiveBits() > 64) {
Diag(I->getRange().getBegin(),
diag::err_type_tag_for_datatype_too_large)
<< LangOpts.CPlusPlus << MagicValueExpr->getSourceRange();
continue;
}
- uint64_t MagicValue = MagicValueInt.getZExtValue();
+ uint64_t MagicValue = MagicValueInt->getZExtValue();
RegisterTypeTagForDatatype(I->getArgumentKind(),
MagicValue,
I->getMatchingCType(),
@@ -13392,7 +13565,7 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
<< getLangOpts().CPlusPlus17;
if (DS.hasConstexprSpecifier())
Diag(DS.getConstexprSpecLoc(), diag::err_invalid_constexpr)
- << 0 << D.getDeclSpec().getConstexprSpecifier();
+ << 0 << static_cast<int>(D.getDeclSpec().getConstexprSpecifier());
DiagnoseFunctionSpecifiers(DS);
@@ -13450,9 +13623,8 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
if (D.getDeclSpec().isModulePrivateSpecified())
Diag(New->getLocation(), diag::err_module_private_local)
- << 1 << New->getDeclName()
- << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
- << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+ << 1 << New << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
+ << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
if (New->hasAttr<BlocksAttr>()) {
Diag(New->getLocation(), diag::err_block_on_nonlocal);
@@ -13504,8 +13676,7 @@ void Sema::DiagnoseSizeOfParametersAndReturnValue(
if (!ReturnTy->isDependentType() && ReturnTy.isPODType(Context)) {
unsigned Size = Context.getTypeSizeInChars(ReturnTy).getQuantity();
if (Size > LangOpts.NumLargeByValueCopy)
- Diag(D->getLocation(), diag::warn_return_value_size)
- << D->getDeclName() << Size;
+ Diag(D->getLocation(), diag::warn_return_value_size) << D << Size;
}
// Warn if any parameter is pass-by-value and larger than the specified
@@ -13517,7 +13688,7 @@ void Sema::DiagnoseSizeOfParametersAndReturnValue(
unsigned Size = Context.getTypeSizeInChars(T).getQuantity();
if (Size > LangOpts.NumLargeByValueCopy)
Diag(Parameter->getLocation(), diag::warn_parameter_size)
- << Parameter->getDeclName() << Size;
+ << Parameter << Size;
}
}
@@ -13601,6 +13772,12 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
New->setInvalidDecl();
}
+ // PPC MMA non-pointer types are not allowed as function argument types.
+ if (Context.getTargetInfo().getTriple().isPPC64() &&
+ CheckPPCMMAType(New->getOriginalType(), New->getLocation())) {
+ New->setInvalidDecl();
+ }
+
return New;
}
@@ -13632,7 +13809,7 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
// Use the identifier location for the type source range.
DS.SetRangeStart(FTI.Params[i].IdentLoc);
DS.SetRangeEnd(FTI.Params[i].IdentLoc);
- Declarator ParamD(DS, DeclaratorContext::KNRTypeListContext);
+ Declarator ParamD(DS, DeclaratorContext::KNRTypeList);
ParamD.SetIdentifier(FTI.Params[i].Ident, FTI.Params[i].IdentLoc);
FTI.Params[i].Param = ActOnParamDeclarator(S, ParamD);
}
@@ -13655,19 +13832,17 @@ Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
// variant` annotation which specifies the mangled definition as a
// specialization function under the OpenMP context defined as part of the
// `omp begin declare variant`.
- FunctionDecl *BaseFD = nullptr;
- if (LangOpts.OpenMP && isInOpenMPDeclareVariantScope() &&
- TemplateParameterLists.empty())
- BaseFD = ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
- ParentScope, D);
+ SmallVector<FunctionDecl *, 4> Bases;
+ if (LangOpts.OpenMP && isInOpenMPDeclareVariantScope())
+ ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
+ ParentScope, D, TemplateParameterLists, Bases);
- D.setFunctionDefinitionKind(FDK_Definition);
+ D.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
Decl *DP = HandleDeclarator(ParentScope, D, TemplateParameterLists);
Decl *Dcl = ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
- if (BaseFD)
- ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
- cast<FunctionDecl>(Dcl), BaseFD);
+ if (!Bases.empty())
+ ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, Bases);
return Dcl;
}
@@ -13736,69 +13911,23 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition,
SkipBodyInfo *SkipBody) {
const FunctionDecl *Definition = EffectiveDefinition;
- if (!Definition && !FD->isDefined(Definition) && !FD->isCXXClassMember()) {
- // If this is a friend function defined in a class template, it does not
- // have a body until it is used, nevertheless it is a definition, see
- // [temp.inst]p2:
- //
- // ... for the purpose of determining whether an instantiated redeclaration
- // is valid according to [basic.def.odr] and [class.mem], a declaration that
- // corresponds to a definition in the template is considered to be a
- // definition.
- //
- // The following code must produce redefinition error:
- //
- // template<typename T> struct C20 { friend void func_20() {} };
- // C20<int> c20i;
- // void func_20() {}
- //
- for (auto I : FD->redecls()) {
- if (I != FD && !I->isInvalidDecl() &&
- I->getFriendObjectKind() != Decl::FOK_None) {
- if (FunctionDecl *Original = I->getInstantiatedFromMemberFunction()) {
- if (FunctionDecl *OrigFD = FD->getInstantiatedFromMemberFunction()) {
- // A merged copy of the same function, instantiated as a member of
- // the same class, is OK.
- if (declaresSameEntity(OrigFD, Original) &&
- declaresSameEntity(cast<Decl>(I->getLexicalDeclContext()),
- cast<Decl>(FD->getLexicalDeclContext())))
- continue;
- }
+ if (!Definition &&
+ !FD->isDefined(Definition, /*CheckForPendingFriendDefinition*/ true))
+ return;
- if (Original->isThisDeclarationADefinition()) {
- Definition = I;
- break;
- }
- }
+ if (Definition->getFriendObjectKind() != Decl::FOK_None) {
+ if (FunctionDecl *OrigDef = Definition->getInstantiatedFromMemberFunction()) {
+ if (FunctionDecl *OrigFD = FD->getInstantiatedFromMemberFunction()) {
+ // A merged copy of the same function, instantiated as a member of
+ // the same class, is OK.
+ if (declaresSameEntity(OrigFD, OrigDef) &&
+ declaresSameEntity(cast<Decl>(Definition->getLexicalDeclContext()),
+ cast<Decl>(FD->getLexicalDeclContext())))
+ return;
}
}
}
- if (!Definition)
- // Similar to friend functions a friend function template may be a
- // definition and do not have a body if it is instantiated in a class
- // template.
- if (FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) {
- for (auto I : FTD->redecls()) {
- auto D = cast<FunctionTemplateDecl>(I);
- if (D != FTD) {
- assert(!D->isThisDeclarationADefinition() &&
- "More than one definition in redeclaration chain");
- if (D->getFriendObjectKind() != Decl::FOK_None)
- if (FunctionTemplateDecl *FT =
- D->getInstantiatedFromMemberTemplate()) {
- if (FT->isThisDeclarationADefinition()) {
- Definition = D->getTemplatedDecl();
- break;
- }
- }
- }
- }
- }
-
- if (!Definition)
- return;
-
if (canRedefineFunction(Definition, getLangOpts()))
return;
@@ -13825,9 +13954,9 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
if (getLangOpts().GNUMode && Definition->isInlineSpecified() &&
Definition->getStorageClass() == SC_Extern)
Diag(FD->getLocation(), diag::err_redefinition_extern_inline)
- << FD->getDeclName() << getLangOpts().CPlusPlus;
+ << FD << getLangOpts().CPlusPlus;
else
- Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
+ Diag(FD->getLocation(), diag::err_redefinition) << FD;
Diag(Definition->getLocation(), diag::note_previous_definition);
FD->setInvalidDecl();
@@ -13916,9 +14045,20 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
FD->setInvalidDecl();
}
- // See if this is a redefinition. If 'will have body' is already set, then
- // these checks were already performed when it was set.
- if (!FD->willHaveBody() && !FD->isLateTemplateParsed()) {
+ if (auto *Ctor = dyn_cast<CXXConstructorDecl>(FD)) {
+ if (Ctor->getTemplateSpecializationKind() == TSK_ExplicitSpecialization &&
+ Ctor->isDefaultConstructor() &&
+ Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ // If this is an MS ABI dllexport default constructor, instantiate any
+ // default arguments.
+ InstantiateDefaultCtorDefaultArgs(Ctor);
+ }
+ }
+
+ // See if this is a redefinition. If 'will have body' (or similar) is already
+ // set, then these checks were already performed when it was set.
+ if (!FD->willHaveBody() && !FD->isLateTemplateParsed() &&
+ !FD->isThisDeclarationInstantiatedFromAFriendDefinition()) {
CheckForFunctionRedefinition(FD, nullptr, SkipBody);
// If we're skipping the body, we're done. Don't enter the scope.
@@ -14158,12 +14298,16 @@ static void diagnoseImplicitlyRetainedSelf(Sema &S) {
Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
bool IsInstantiation) {
+ FunctionScopeInfo *FSI = getCurFunction();
FunctionDecl *FD = dcl ? dcl->getAsFunction() : nullptr;
+ if (FSI->UsesFPIntrin && !FD->hasAttr<StrictFPAttr>())
+ FD->addAttr(StrictFPAttr::CreateImplicit(Context));
+
sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
sema::AnalysisBasedWarnings::Policy *ActivePolicy = nullptr;
- if (getLangOpts().Coroutines && getCurFunction()->isCoroutine())
+ if (getLangOpts().Coroutines && FSI->isCoroutine())
CheckCompletedCoroutineBody(FD, Body);
// Do not call PopExpressionEvaluationContext() if it is a lambda because one
@@ -14240,7 +14384,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// to deduce an implicit return type.
if (FD->getReturnType()->isRecordType() &&
(!getLangOpts().CPlusPlus || !FD->isDependentContext()))
- computeNRVO(Body, getCurFunction());
+ computeNRVO(Body, FSI);
}
// GNU warning -Wmissing-prototypes:
@@ -14364,14 +14508,14 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
MD->getReturnType(), MD);
if (Body)
- computeNRVO(Body, getCurFunction());
+ computeNRVO(Body, FSI);
}
- if (getCurFunction()->ObjCShouldCallSuper) {
+ if (FSI->ObjCShouldCallSuper) {
Diag(MD->getEndLoc(), diag::warn_objc_missing_super_call)
<< MD->getSelector().getAsString();
- getCurFunction()->ObjCShouldCallSuper = false;
+ FSI->ObjCShouldCallSuper = false;
}
- if (getCurFunction()->ObjCWarnForNoDesignatedInitChain) {
+ if (FSI->ObjCWarnForNoDesignatedInitChain) {
const ObjCMethodDecl *InitMethod = nullptr;
bool isDesignated =
MD->isDesignatedInitializerForTheInterface(&InitMethod);
@@ -14396,14 +14540,14 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
Diag(InitMethod->getLocation(),
diag::note_objc_designated_init_marked_here);
}
- getCurFunction()->ObjCWarnForNoDesignatedInitChain = false;
+ FSI->ObjCWarnForNoDesignatedInitChain = false;
}
- if (getCurFunction()->ObjCWarnForNoInitDelegation) {
+ if (FSI->ObjCWarnForNoInitDelegation) {
// Don't issue this warning for unavaialable inits.
if (!MD->isUnavailable())
Diag(MD->getLocation(),
diag::warn_objc_secondary_init_missing_init_call);
- getCurFunction()->ObjCWarnForNoInitDelegation = false;
+ FSI->ObjCWarnForNoInitDelegation = false;
}
diagnoseImplicitlyRetainedSelf(*this);
@@ -14414,10 +14558,10 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
return nullptr;
}
- if (Body && getCurFunction()->HasPotentialAvailabilityViolations)
+ if (Body && FSI->HasPotentialAvailabilityViolations)
DiagnoseUnguardedAvailabilityViolations(dcl);
- assert(!getCurFunction()->ObjCShouldCallSuper &&
+ assert(!FSI->ObjCShouldCallSuper &&
"This should only be set for ObjC methods, which should have been "
"handled in the block above.");
@@ -14430,7 +14574,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body));
// Verify that gotos and switch cases don't jump into scopes illegally.
- if (getCurFunction()->NeedsScopeChecking() &&
+ if (FSI->NeedsScopeChecking() &&
!PP.isCodeCompletionEnabled())
DiagnoseInvalidJumps(Body);
@@ -14445,11 +14589,11 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
- if (getDiagnostics().hasUncompilableErrorOccurred() ||
+ if (hasUncompilableErrorOccurred() ||
getDiagnostics().getSuppressAllDiagnostics()) {
DiscardCleanupsInEvaluationContext();
}
- if (!getDiagnostics().hasUncompilableErrorOccurred() &&
+ if (!hasUncompilableErrorOccurred() &&
!isa<FunctionTemplateDecl>(dcl)) {
// Since the body is valid, issue any analysis-based warnings that are
// enabled.
@@ -14501,11 +14645,11 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
- if (getDiagnostics().hasUncompilableErrorOccurred()) {
+ if (hasUncompilableErrorOccurred()) {
DiscardCleanupsInEvaluationContext();
}
- if (LangOpts.OpenMP || LangOpts.CUDA || LangOpts.SYCLIsDevice) {
+ if (FD && (LangOpts.OpenMP || LangOpts.CUDA || LangOpts.SYCLIsDevice)) {
auto ES = getEmissionStatus(FD);
if (ES == Sema::FunctionEmissionStatus::Emitted ||
ES == Sema::FunctionEmissionStatus::Unknown)
@@ -14612,7 +14756,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
(void)Error; // Silence warning.
assert(!Error && "Error setting up implicit decl!");
SourceLocation NoLoc;
- Declarator D(DS, DeclaratorContext::BlockContext);
+ Declarator D(DS, DeclaratorContext::Block);
D.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/false,
/*IsAmbiguous=*/false,
/*LParenLoc=*/NoLoc,
@@ -14882,9 +15026,10 @@ TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
if (D.getDeclSpec().isModulePrivateSpecified()) {
if (CurContext->isFunctionOrMethod())
Diag(NewTD->getLocation(), diag::err_module_private_local)
- << 2 << NewTD->getDeclName()
- << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
- << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+ << 2 << NewTD
+ << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getModulePrivateSpecLoc());
else
NewTD->setModulePrivate();
}
@@ -15272,6 +15417,10 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
isMemberSpecialization = true;
}
}
+
+ if (!TemplateParameterLists.empty() && isMemberSpecialization &&
+ CheckTemplateDeclScope(S, TemplateParameterLists.back()))
+ return nullptr;
}
// Figure out the underlying type if this a enum declaration. We need to do
@@ -15592,6 +15741,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// type declared by an elaborated-type-specifier. In C that is not correct
// and we should instead merge compatible types found by lookup.
if (getLangOpts().CPlusPlus) {
+ // FIXME: This can perform qualified lookups into function contexts,
+ // which are meaningless.
Previous.setRedeclarationKind(forRedeclarationInCurContext());
LookupQualifiedName(Previous, SearchDC);
} else {
@@ -16309,7 +16460,7 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
return BitWidth;
llvm::APSInt Value;
- ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Value);
+ ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Value, AllowFold);
if (ICE.isInvalid())
return ICE;
BitWidth = ICE.get();
@@ -16329,6 +16480,13 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
<< Value.toString(10);
}
+ // The size of the bit-field must not exceed our maximum permitted object
+ // size.
+ if (Value.getActiveBits() > ConstantArrayType::getMaxSizeBits(Context)) {
+ return Diag(FieldLoc, diag::err_bitfield_too_wide)
+ << !FieldName << FieldName << Value.toString(10);
+ }
+
if (!FieldTy->isDependentType()) {
uint64_t TypeStorageSize = Context.getTypeSize(FieldTy);
uint64_t TypeWidth = Context.getIntWidth(FieldTy);
@@ -16346,25 +16504,21 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
CStdConstraintViolation ? TypeWidth : TypeStorageSize;
if (FieldName)
return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_width)
- << FieldName << (unsigned)Value.getZExtValue()
+ << FieldName << Value.toString(10)
<< !CStdConstraintViolation << DiagWidth;
return Diag(FieldLoc, diag::err_anon_bitfield_width_exceeds_type_width)
- << (unsigned)Value.getZExtValue() << !CStdConstraintViolation
+ << Value.toString(10) << !CStdConstraintViolation
<< DiagWidth;
}
// Warn on types where the user might conceivably expect to get all
// specified bits as value bits: that's all integral types other than
// 'bool'.
- if (BitfieldIsOverwide && !FieldTy->isBooleanType()) {
- if (FieldName)
- Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_width)
- << FieldName << (unsigned)Value.getZExtValue()
- << (unsigned)TypeWidth;
- else
- Diag(FieldLoc, diag::warn_anon_bitfield_width_exceeds_type_width)
- << (unsigned)Value.getZExtValue() << (unsigned)TypeWidth;
+ if (BitfieldIsOverwide && !FieldTy->isBooleanType() && FieldName) {
+ Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_width)
+ << FieldName << Value.toString(10)
+ << (unsigned)TypeWidth;
}
}
@@ -16558,27 +16712,9 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
// C99 6.7.2.1p8: A member of a structure or union may have any type other
// than a variably modified type.
if (!InvalidDecl && T->isVariablyModifiedType()) {
- bool SizeIsNegative;
- llvm::APSInt Oversized;
-
- TypeSourceInfo *FixedTInfo =
- TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
- SizeIsNegative,
- Oversized);
- if (FixedTInfo) {
- Diag(Loc, diag::warn_illegal_constant_array_size);
- TInfo = FixedTInfo;
- T = FixedTInfo->getType();
- } else {
- if (SizeIsNegative)
- Diag(Loc, diag::err_typecheck_negative_array_size);
- else if (Oversized.getBoolValue())
- Diag(Loc, diag::err_array_too_large)
- << Oversized.toString(10);
- else
- Diag(Loc, diag::err_typecheck_field_variable_size);
+ if (!tryToFixVariablyModifiedVarType(
+ *this, TInfo, T, Loc, diag::err_typecheck_field_variable_size))
InvalidDecl = true;
- }
}
// Fields can not have abstract class types
@@ -16599,14 +16735,6 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
BitWidth = nullptr;
ZeroWidth = false;
}
-
- // Only data members can have in-class initializers.
- if (BitWidth && !II && InitStyle) {
- Diag(Loc, diag::err_anon_bitfield_init);
- InvalidDecl = true;
- BitWidth = nullptr;
- ZeroWidth = false;
- }
}
// Check that 'mutable' is consistent with the type of the declaration.
@@ -16694,6 +16822,11 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
if (T.isObjCGCWeak())
Diag(Loc, diag::warn_attribute_weak_on_field);
+ // PPC MMA non-pointer types are not allowed as field types.
+ if (Context.getTargetInfo().getTriple().isPPC64() &&
+ CheckPPCMMAType(T, NewFD->getLocation()))
+ NewFD->setInvalidDecl();
+
NewFD->setAccess(AS);
return NewFD;
}
@@ -16807,8 +16940,9 @@ Decl *Sema::ActOnIvar(Scope *S,
// C99 6.7.2.1p8: A member of a structure or union may have any type other
// than a variably modified type.
else if (T->isVariablyModifiedType()) {
- Diag(Loc, diag::err_typecheck_ivar_variable_size);
- D.setInvalidType();
+ if (!tryToFixVariablyModifiedVarType(
+ *this, TInfo, T, Loc, diag::err_typecheck_ivar_variable_size))
+ D.setInvalidType();
}
// Get the visibility (access control) for this ivar.
@@ -17271,7 +17405,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record);
CheckForZeroSize =
CXXRecord->getLexicalDeclContext()->isExternCContext() &&
- !CXXRecord->isDependentType() &&
+ !CXXRecord->isDependentType() && !inTemplateInstantiation() &&
CXXRecord->isCLike();
}
if (CheckForZeroSize) {
@@ -17435,6 +17569,8 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
if (Enum->isDependentType() || Val->isTypeDependent())
EltTy = Context.DependentTy;
else {
+ // FIXME: We don't allow folding in C++11 mode for an enum with a fixed
+ // underlying type, but do allow it in all other contexts.
if (getLangOpts().CPlusPlus11 && Enum->isFixed()) {
// C++11 [dcl.enum]p5: If the underlying type is fixed, [...] the
// constant-expression in the enumerator-definition shall be a converted
@@ -17448,8 +17584,9 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
else
Val = Converted.get();
} else if (!Val->isValueDependent() &&
- !(Val = VerifyIntegerConstantExpression(Val,
- &EnumVal).get())) {
+ !(Val =
+ VerifyIntegerConstantExpression(Val, &EnumVal, AllowFold)
+ .get())) {
// C99 6.7.2.2p2: Make sure we have an integer constant expression.
} else {
if (Enum->isComplete()) {
@@ -18070,11 +18207,9 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
// Adjust the Expr initializer and type.
if (ECD->getInitExpr() &&
!Context.hasSameType(NewTy, ECD->getInitExpr()->getType()))
- ECD->setInitExpr(ImplicitCastExpr::Create(Context, NewTy,
- CK_IntegralCast,
- ECD->getInitExpr(),
- /*base paths*/ nullptr,
- VK_RValue));
+ ECD->setInitExpr(ImplicitCastExpr::Create(
+ Context, NewTy, CK_IntegralCast, ECD->getInitExpr(),
+ /*base paths*/ nullptr, VK_RValue, FPOptionsOverride()));
if (getLangOpts().CPlusPlus)
// C++ [dcl.enum]p4: Following the closing brace of an
// enum-specifier, each enumerator has the type of its
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index 1a0594512a60..30d08b3d4ac0 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -21,7 +21,9 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
@@ -30,12 +32,16 @@
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Assumptions.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace sema;
@@ -240,9 +246,9 @@ template <typename AttrInfo>
static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
uint32_t &Val, unsigned Idx = UINT_MAX,
bool StrictlyUnsigned = false) {
- llvm::APSInt I(32);
+ Optional<llvm::APSInt> I = llvm::APSInt(32);
if (Expr->isTypeDependent() || Expr->isValueDependent() ||
- !Expr->isIntegerConstantExpr(I, S.Context)) {
+ !(I = Expr->getIntegerConstantExpr(S.Context))) {
if (Idx != UINT_MAX)
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
<< &AI << Idx << AANT_ArgumentIntegerConstant
@@ -253,19 +259,19 @@ static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
return false;
}
- if (!I.isIntN(32)) {
+ if (!I->isIntN(32)) {
S.Diag(Expr->getExprLoc(), diag::err_ice_too_large)
- << I.toString(10, false) << 32 << /* Unsigned */ 1;
+ << I->toString(10, false) << 32 << /* Unsigned */ 1;
return false;
}
- if (StrictlyUnsigned && I.isSigned() && I.isNegative()) {
+ if (StrictlyUnsigned && I->isSigned() && I->isNegative()) {
S.Diag(getAttrLoc(AI), diag::err_attribute_requires_positive_integer)
<< &AI << /*non-negative*/ 1;
return false;
}
- Val = (uint32_t)I.getZExtValue();
+ Val = (uint32_t)I->getZExtValue();
return true;
}
@@ -332,16 +338,16 @@ static bool checkFunctionOrMethodParameterIndex(
unsigned NumParams =
(HP ? getFunctionOrMethodNumParams(D) : 0) + HasImplicitThisParam;
- llvm::APSInt IdxInt;
+ Optional<llvm::APSInt> IdxInt;
if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
- !IdxExpr->isIntegerConstantExpr(IdxInt, S.Context)) {
+ !(IdxInt = IdxExpr->getIntegerConstantExpr(S.Context))) {
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
<< &AI << AttrArgNum << AANT_ArgumentIntegerConstant
<< IdxExpr->getSourceRange();
return false;
}
- unsigned IdxSource = IdxInt.getLimitedValue(UINT_MAX);
+ unsigned IdxSource = IdxInt->getLimitedValue(UINT_MAX);
if (IdxSource < 1 || (!IV && IdxSource > NumParams)) {
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_out_of_bounds)
<< &AI << AttrArgNum << IdxExpr->getSourceRange();
@@ -1376,6 +1382,43 @@ static void handlePackedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(AL.getLoc(), diag::warn_attribute_ignored) << AL;
}
+static void handlePreferredName(Sema &S, Decl *D, const ParsedAttr &AL) {
+ auto *RD = cast<CXXRecordDecl>(D);
+ ClassTemplateDecl *CTD = RD->getDescribedClassTemplate();
+ assert(CTD && "attribute does not appertain to this declaration");
+
+ ParsedType PT = AL.getTypeArg();
+ TypeSourceInfo *TSI = nullptr;
+ QualType T = S.GetTypeFromParser(PT, &TSI);
+ if (!TSI)
+ TSI = S.Context.getTrivialTypeSourceInfo(T, AL.getLoc());
+
+ if (!T.hasQualifiers() && T->isTypedefNameType()) {
+ // Find the template name, if this type names a template specialization.
+ const TemplateDecl *Template = nullptr;
+ if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
+ T->getAsCXXRecordDecl())) {
+ Template = CTSD->getSpecializedTemplate();
+ } else if (const auto *TST = T->getAs<TemplateSpecializationType>()) {
+ while (TST && TST->isTypeAlias())
+ TST = TST->getAliasedType()->getAs<TemplateSpecializationType>();
+ if (TST)
+ Template = TST->getTemplateName().getAsTemplateDecl();
+ }
+
+ if (Template && declaresSameEntity(Template, CTD)) {
+ D->addAttr(::new (S.Context) PreferredNameAttr(S.Context, AL, TSI));
+ return;
+ }
+ }
+
+ S.Diag(AL.getLoc(), diag::err_attribute_preferred_name_arg_invalid)
+ << T << CTD;
+ if (const auto *TT = T->getAs<TypedefType>())
+ S.Diag(TT->getDecl()->getLocation(), diag::note_entity_declared_at)
+ << TT->getDecl();
+}
+
static bool checkIBOutletCommon(Sema &S, Decl *D, const ParsedAttr &AL) {
// The IBOutlet/IBOutletCollection attributes only apply to instance
// variables or properties of Objective-C classes. The outlet must also
@@ -1605,8 +1648,8 @@ void Sema::AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
}
if (!E->isValueDependent()) {
- llvm::APSInt I(64);
- if (!E->isIntegerConstantExpr(I, Context)) {
+ Optional<llvm::APSInt> I = llvm::APSInt(64);
+ if (!(I = E->getIntegerConstantExpr(Context))) {
if (OE)
Diag(AttrLoc, diag::err_attribute_argument_n_type)
<< &TmpAttr << 1 << AANT_ArgumentIntegerConstant
@@ -1618,27 +1661,22 @@ void Sema::AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
return;
}
- if (!I.isPowerOf2()) {
+ if (!I->isPowerOf2()) {
Diag(AttrLoc, diag::err_alignment_not_power_of_two)
<< E->getSourceRange();
return;
}
- if (I > Sema::MaximumAlignment)
+ if (*I > Sema::MaximumAlignment)
Diag(CI.getLoc(), diag::warn_assume_aligned_too_great)
<< CI.getRange() << Sema::MaximumAlignment;
}
- if (OE) {
- if (!OE->isValueDependent()) {
- llvm::APSInt I(64);
- if (!OE->isIntegerConstantExpr(I, Context)) {
- Diag(AttrLoc, diag::err_attribute_argument_n_type)
- << &TmpAttr << 2 << AANT_ArgumentIntegerConstant
- << OE->getSourceRange();
- return;
- }
- }
+ if (OE && !OE->isValueDependent() && !OE->isIntegerConstantExpr(Context)) {
+ Diag(AttrLoc, diag::err_attribute_argument_n_type)
+ << &TmpAttr << 2 << AANT_ArgumentIntegerConstant
+ << OE->getSourceRange();
+ return;
}
D->addAttr(::new (Context) AssumeAlignedAttr(Context, CI, E, OE));
@@ -1676,6 +1714,42 @@ void Sema::AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
D->addAttr(::new (Context) AllocAlignAttr(Context, CI, Idx));
}
+/// Check if \p AssumptionStr is a known assumption and warn if not.
+static void checkAssumptionAttr(Sema &S, SourceLocation Loc,
+ StringRef AssumptionStr) {
+ if (llvm::KnownAssumptionStrings.count(AssumptionStr))
+ return;
+
+ unsigned BestEditDistance = 3;
+ StringRef Suggestion;
+ for (const auto &KnownAssumptionIt : llvm::KnownAssumptionStrings) {
+ unsigned EditDistance =
+ AssumptionStr.edit_distance(KnownAssumptionIt.getKey());
+ if (EditDistance < BestEditDistance) {
+ Suggestion = KnownAssumptionIt.getKey();
+ BestEditDistance = EditDistance;
+ }
+ }
+
+ if (!Suggestion.empty())
+ S.Diag(Loc, diag::warn_assume_attribute_string_unknown_suggested)
+ << AssumptionStr << Suggestion;
+ else
+ S.Diag(Loc, diag::warn_assume_attribute_string_unknown) << AssumptionStr;
+}
+
+static void handleAssumumptionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // Handle the case where the attribute has a text message.
+ StringRef Str;
+ SourceLocation AttrStrLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &AttrStrLoc))
+ return;
+
+ checkAssumptionAttr(S, AttrStrLoc, Str);
+
+ D->addAttr(::new (S.Context) AssumptionAttr(S.Context, AL, Str));
+}
+
/// Normalize the attribute, __foo__ becomes foo.
/// Returns true if normalization was applied.
static bool normalizeName(StringRef &AttrName) {
@@ -2056,7 +2130,8 @@ bool Sema::CheckAttrNoArgs(const ParsedAttr &Attrs) {
bool Sema::CheckAttrTarget(const ParsedAttr &AL) {
// Check whether the attribute is valid on the current target.
if (!AL.existsInTarget(Context.getTargetInfo())) {
- Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored) << AL;
+ Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
+ << AL << AL.getRange();
AL.setInvalid();
return true;
}
@@ -2618,6 +2693,11 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const ParsedAttr &AL,
D->addAttr(newAttr);
}
+static void handleObjCNonRuntimeProtocolAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ handleSimpleAttribute<ObjCNonRuntimeProtocolAttr>(S, D, AL);
+}
+
static void handleObjCDirectAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// objc_direct cannot be set on methods declared in the context of a protocol
if (isa<ObjCProtocolDecl>(D->getDeclContext())) {
@@ -2729,36 +2809,36 @@ static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
unsigned sentinel = (unsigned)SentinelAttr::DefaultSentinel;
if (AL.getNumArgs() > 0) {
Expr *E = AL.getArgAsExpr(0);
- llvm::APSInt Idx(32);
+ Optional<llvm::APSInt> Idx = llvm::APSInt(32);
if (E->isTypeDependent() || E->isValueDependent() ||
- !E->isIntegerConstantExpr(Idx, S.Context)) {
+ !(Idx = E->getIntegerConstantExpr(S.Context))) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
<< AL << 1 << AANT_ArgumentIntegerConstant << E->getSourceRange();
return;
}
- if (Idx.isSigned() && Idx.isNegative()) {
+ if (Idx->isSigned() && Idx->isNegative()) {
S.Diag(AL.getLoc(), diag::err_attribute_sentinel_less_than_zero)
<< E->getSourceRange();
return;
}
- sentinel = Idx.getZExtValue();
+ sentinel = Idx->getZExtValue();
}
unsigned nullPos = (unsigned)SentinelAttr::DefaultNullPos;
if (AL.getNumArgs() > 1) {
Expr *E = AL.getArgAsExpr(1);
- llvm::APSInt Idx(32);
+ Optional<llvm::APSInt> Idx = llvm::APSInt(32);
if (E->isTypeDependent() || E->isValueDependent() ||
- !E->isIntegerConstantExpr(Idx, S.Context)) {
+ !(Idx = E->getIntegerConstantExpr(S.Context))) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
<< AL << 2 << AANT_ArgumentIntegerConstant << E->getSourceRange();
return;
}
- nullPos = Idx.getZExtValue();
+ nullPos = Idx->getZExtValue();
- if ((Idx.isSigned() && Idx.isNegative()) || nullPos > 1) {
+ if ((Idx->isSigned() && Idx->isNegative()) || nullPos > 1) {
// FIXME: This error message could be improved, it would be nice
// to say what the bounds actually are.
S.Diag(AL.getLoc(), diag::err_attribute_sentinel_not_zero_or_one)
@@ -3001,8 +3081,14 @@ static void handleSectionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
SectionAttr *NewAttr = S.mergeSectionAttr(D, AL, Str);
- if (NewAttr)
+ if (NewAttr) {
D->addAttr(NewAttr);
+ if (isa<FunctionDecl, FunctionTemplateDecl, ObjCMethodDecl,
+ ObjCPropertyDecl>(D))
+ S.UnifySection(NewAttr->getName(),
+ ASTContext::PSF_Execute | ASTContext::PSF_Read,
+ cast<NamedDecl>(D));
+ }
}
// This is used for `__declspec(code_seg("segname"))` on a decl.
@@ -3063,23 +3149,36 @@ static void handleCodeSegAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Check for things we'd like to warn about. Multiversioning issues are
// handled later in the process, once we know how many exist.
bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) {
- enum FirstParam { Unsupported, Duplicate };
- enum SecondParam { None, Architecture };
- for (auto Str : {"tune=", "fpmath="})
- if (AttrStr.find(Str) != StringRef::npos)
- return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
- << Unsupported << None << Str;
+ enum FirstParam { Unsupported, Duplicate, Unknown };
+ enum SecondParam { None, Architecture, Tune };
+ if (AttrStr.find("fpmath=") != StringRef::npos)
+ return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
+ << Unsupported << None << "fpmath=";
+
+ // Diagnose use of tune if target doesn't support it.
+ if (!Context.getTargetInfo().supportsTargetAttributeTune() &&
+ AttrStr.find("tune=") != StringRef::npos)
+ return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
+ << Unsupported << None << "tune=";
ParsedTargetAttr ParsedAttrs = TargetAttr::parse(AttrStr);
if (!ParsedAttrs.Architecture.empty() &&
!Context.getTargetInfo().isValidCPUName(ParsedAttrs.Architecture))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
- << Unsupported << Architecture << ParsedAttrs.Architecture;
+ << Unknown << Architecture << ParsedAttrs.Architecture;
+
+ if (!ParsedAttrs.Tune.empty() &&
+ !Context.getTargetInfo().isValidCPUName(ParsedAttrs.Tune))
+ return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
+ << Unknown << Tune << ParsedAttrs.Tune;
if (ParsedAttrs.DuplicateArchitecture)
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Duplicate << None << "arch=";
+ if (ParsedAttrs.DuplicateTune)
+ return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
+ << Duplicate << None << "tune=";
for (const auto &Feature : ParsedAttrs.Features) {
auto CurFeature = StringRef(Feature).drop_front(); // remove + or -.
@@ -3301,7 +3400,11 @@ static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- if (prioritynum < 101 || prioritynum > 65535) {
+ // Only perform the priority check if the attribute is outside of a system
+ // header. Values <= 100 are reserved for the implementation, and libc++
+ // benefits from being able to specify values in that range.
+ if ((prioritynum < 101 || prioritynum > 65535) &&
+ !S.getSourceManager().isInSystemHeader(AL.getLoc())) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_range)
<< E->getSourceRange() << AL << 101 << 65535;
AL.setInvalid();
@@ -3590,6 +3693,26 @@ static void handleCallbackAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Context, AL, EncodingIndices.data(), EncodingIndices.size()));
}
+static bool isFunctionLike(const Type &T) {
+ // Check for explicit function types.
+ // 'called_once' is only supported in Objective-C and it has
+ // function pointers and block pointers.
+ return T.isFunctionPointerType() || T.isBlockPointerType();
+}
+
+/// Handle 'called_once' attribute.
+static void handleCalledOnceAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // 'called_once' only applies to parameters representing functions.
+ QualType T = cast<ParmVarDecl>(D)->getType();
+
+ if (!isFunctionLike(*T)) {
+ S.Diag(AL.getLoc(), diag::err_called_once_attribute_wrong_type);
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CalledOnceAttr(S.Context, AL));
+}
+
static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Try to find the underlying union declaration.
RecordDecl *RD = nullptr;
@@ -3645,15 +3768,15 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Context.getTypeAlign(FieldType) > FirstAlign) {
// Warn if we drop the attribute.
bool isSize = S.Context.getTypeSize(FieldType) != FirstSize;
- unsigned FieldBits = isSize? S.Context.getTypeSize(FieldType)
- : S.Context.getTypeAlign(FieldType);
+ unsigned FieldBits = isSize ? S.Context.getTypeSize(FieldType)
+ : S.Context.getTypeAlign(FieldType);
S.Diag(Field->getLocation(),
- diag::warn_transparent_union_attribute_field_size_align)
- << isSize << Field->getDeclName() << FieldBits;
- unsigned FirstBits = isSize? FirstSize : FirstAlign;
+ diag::warn_transparent_union_attribute_field_size_align)
+ << isSize << *Field << FieldBits;
+ unsigned FirstBits = isSize ? FirstSize : FirstAlign;
S.Diag(FirstField->getLocation(),
diag::note_transparent_union_first_field_size_align)
- << isSize << FirstBits;
+ << isSize << FirstBits;
return;
}
}
@@ -3661,20 +3784,68 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
RD->addAttr(::new (S.Context) TransparentUnionAttr(S.Context, AL));
}
+void Sema::AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
+ StringRef Str, MutableArrayRef<Expr *> Args) {
+ auto *Attr = AnnotateAttr::Create(Context, Str, Args.data(), Args.size(), CI);
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ for (unsigned Idx = 0; Idx < Attr->args_size(); Idx++) {
+ Expr *&E = Attr->args_begin()[Idx];
+ assert(E && "error are handled before");
+ if (E->isValueDependent() || E->isTypeDependent())
+ continue;
+
+ if (E->getType()->isArrayType())
+ E = ImpCastExprToType(E, Context.getPointerType(E->getType()),
+ clang::CK_ArrayToPointerDecay)
+ .get();
+ if (E->getType()->isFunctionType())
+ E = ImplicitCastExpr::Create(Context,
+ Context.getPointerType(E->getType()),
+ clang::CK_FunctionToPointerDecay, E, nullptr,
+ VK_RValue, FPOptionsOverride());
+ if (E->isLValue())
+ E = ImplicitCastExpr::Create(Context, E->getType().getNonReferenceType(),
+ clang::CK_LValueToRValue, E, nullptr,
+ VK_RValue, FPOptionsOverride());
+
+ Expr::EvalResult Eval;
+ Notes.clear();
+ Eval.Diag = &Notes;
+
+ bool Result =
+ E->EvaluateAsConstantExpr(Eval, Context);
+
+ /// Result means the expression can be folded to a constant.
+ /// Note.empty() means the expression is a valid constant expression in the
+ /// current language mode.
+ if (!Result || !Notes.empty()) {
+ Diag(E->getBeginLoc(), diag::err_attribute_argument_n_type)
+ << CI << (Idx + 1) << AANT_ArgumentConstantExpr;
+ for (auto &Note : Notes)
+ Diag(Note.first, Note.second);
+ return;
+ }
+ assert(Eval.Val.hasValue());
+ E = ConstantExpr::Create(Context, E, Eval.Val);
+ }
+ D->addAttr(Attr);
+}
+
static void handleAnnotateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- // Make sure that there is a string literal as the annotation's single
+ // Make sure that there is a string literal as the annotation's first
// argument.
StringRef Str;
if (!S.checkStringLiteralArgumentAttr(AL, 0, Str))
return;
- // Don't duplicate annotations that are already set.
- for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
- if (I->getAnnotation() == Str)
- return;
+ llvm::SmallVector<Expr *, 4> Args;
+ Args.reserve(AL.getNumArgs() - 1);
+ for (unsigned Idx = 1; Idx < AL.getNumArgs(); Idx++) {
+ assert(!AL.isArgIdent(Idx));
+ Args.push_back(AL.getArgAsExpr(Idx));
}
- D->addAttr(::new (S.Context) AnnotateAttr(S.Context, AL, Str));
+ S.AddAnnotationAttr(D, AL, Str, Args);
}
static void handleAlignValueAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -3702,10 +3873,8 @@ void Sema::AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E) {
if (!E->isValueDependent()) {
llvm::APSInt Alignment;
- ExprResult ICE
- = VerifyIntegerConstantExpression(E, &Alignment,
- diag::err_align_value_attribute_argument_not_int,
- /*AllowFold*/ false);
+ ExprResult ICE = VerifyIntegerConstantExpression(
+ E, &Alignment, diag::err_align_value_attribute_argument_not_int);
if (ICE.isInvalid())
return;
@@ -3811,10 +3980,8 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
// FIXME: Cache the number on the AL object?
llvm::APSInt Alignment;
- ExprResult ICE
- = VerifyIntegerConstantExpression(E, &Alignment,
- diag::err_aligned_attribute_argument_not_int,
- /*AllowFold*/ false);
+ ExprResult ICE = VerifyIntegerConstantExpression(
+ E, &Alignment, diag::err_aligned_attribute_argument_not_int);
if (ICE.isInvalid())
return;
@@ -4265,6 +4432,20 @@ NoSpeculativeLoadHardeningAttr *Sema::mergeNoSpeculativeLoadHardeningAttr(
return ::new (Context) NoSpeculativeLoadHardeningAttr(Context, AL);
}
+SwiftNameAttr *Sema::mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
+ StringRef Name) {
+ if (const auto *PrevSNA = D->getAttr<SwiftNameAttr>()) {
+ if (PrevSNA->getName() != Name && !PrevSNA->isImplicit()) {
+ Diag(PrevSNA->getLocation(), diag::err_attributes_are_not_compatible)
+ << PrevSNA << &SNA;
+ Diag(SNA.getLoc(), diag::note_conflicting_attribute);
+ }
+
+ D->dropAttr<SwiftNameAttr>();
+ }
+ return ::new (Context) SwiftNameAttr(Context, SNA, Name);
+}
+
OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI) {
if (AlwaysInlineAttr *Inline = D->getAttr<AlwaysInlineAttr>()) {
@@ -4312,18 +4493,20 @@ static void handleOptimizeNoneAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleConstantAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<CUDASharedAttr>(S, D, AL))
+ if (checkAttrMutualExclusion<CUDASharedAttr>(S, D, AL) ||
+ checkAttrMutualExclusion<HIPManagedAttr>(S, D, AL))
return;
const auto *VD = cast<VarDecl>(D);
- if (!VD->hasGlobalStorage()) {
- S.Diag(AL.getLoc(), diag::err_cuda_nonglobal_constant);
+ if (VD->hasLocalStorage()) {
+ S.Diag(AL.getLoc(), diag::err_cuda_nonstatic_constdev);
return;
}
D->addAttr(::new (S.Context) CUDAConstantAttr(S.Context, AL));
}
static void handleSharedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<CUDAConstantAttr>(S, D, AL))
+ if (checkAttrMutualExclusion<CUDAConstantAttr>(S, D, AL) ||
+ checkAttrMutualExclusion<HIPManagedAttr>(S, D, AL))
return;
const auto *VD = cast<VarDecl>(D);
// extern __shared__ is only allowed on arrays with no length (e.g.
@@ -4377,6 +4560,44 @@ static void handleGlobalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(NoDebugAttr::CreateImplicit(S.Context));
}
+static void handleDeviceAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<CUDAGlobalAttr>(S, D, AL)) {
+ return;
+ }
+
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->hasLocalStorage()) {
+ S.Diag(AL.getLoc(), diag::err_cuda_nonstatic_constdev);
+ return;
+ }
+ }
+
+ if (auto *A = D->getAttr<CUDADeviceAttr>()) {
+ if (!A->isImplicit())
+ return;
+ D->dropAttr<CUDADeviceAttr>();
+ }
+ D->addAttr(::new (S.Context) CUDADeviceAttr(S.Context, AL));
+}
+
+static void handleManagedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<CUDAConstantAttr>(S, D, AL) ||
+ checkAttrMutualExclusion<CUDASharedAttr>(S, D, AL)) {
+ return;
+ }
+
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->hasLocalStorage()) {
+ S.Diag(AL.getLoc(), diag::err_cuda_nonstatic_constdev);
+ return;
+ }
+ }
+ if (!D->hasAttr<HIPManagedAttr>())
+ D->addAttr(::new (S.Context) HIPManagedAttr(S.Context, AL));
+ if (!D->hasAttr<CUDADeviceAttr>())
+ D->addAttr(CUDADeviceAttr::CreateImplicit(S.Context));
+}
+
static void handleGNUInlineAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
const auto *Fn = cast<FunctionDecl>(D);
if (!Fn->isInlineSpecified()) {
@@ -4833,19 +5054,19 @@ static Expr *makeLaunchBoundsArgExpr(Sema &S, Expr *E,
if (E->isValueDependent())
return E;
- llvm::APSInt I(64);
- if (!E->isIntegerConstantExpr(I, S.Context)) {
+ Optional<llvm::APSInt> I = llvm::APSInt(64);
+ if (!(I = E->getIntegerConstantExpr(S.Context))) {
S.Diag(E->getExprLoc(), diag::err_attribute_argument_n_type)
<< &AL << Idx << AANT_ArgumentIntegerConstant << E->getSourceRange();
return nullptr;
}
// Make sure we can fit it in 32 bits.
- if (!I.isIntN(32)) {
- S.Diag(E->getExprLoc(), diag::err_ice_too_large) << I.toString(10, false)
- << 32 << /* Unsigned */ 1;
+ if (!I->isIntN(32)) {
+ S.Diag(E->getExprLoc(), diag::err_ice_too_large)
+ << I->toString(10, false) << 32 << /* Unsigned */ 1;
return nullptr;
}
- if (I < 0)
+ if (*I < 0)
S.Diag(E->getExprLoc(), diag::warn_attribute_argument_n_negative)
<< &AL << Idx << E->getSourceRange();
@@ -5327,6 +5548,31 @@ static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
D->addAttr(::new (S.Context) ObjCRequiresSuperAttr(S.Context, Attrs));
}
+static void handleNSErrorDomain(Sema &S, Decl *D, const ParsedAttr &AL) {
+ auto *E = AL.getArgAsExpr(0);
+ auto Loc = E ? E->getBeginLoc() : AL.getLoc();
+
+ auto *DRE = dyn_cast<DeclRefExpr>(AL.getArgAsExpr(0));
+ if (!DRE) {
+ S.Diag(Loc, diag::err_nserrordomain_invalid_decl) << 0;
+ return;
+ }
+
+ auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD) {
+ S.Diag(Loc, diag::err_nserrordomain_invalid_decl) << 1 << DRE->getDecl();
+ return;
+ }
+
+ if (!isNSStringType(VD->getType(), S.Context) &&
+ !isCFStringType(VD->getType(), S.Context)) {
+ S.Diag(Loc, diag::err_nserrordomain_wrong_type) << VD;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) NSErrorDomainAttr(S.Context, AL, VD));
+}
+
static void handleObjCBridgeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
IdentifierLoc *Parm = AL.isArgIdent(0) ? AL.getArgAsIdent(0) : nullptr;
@@ -5488,6 +5734,515 @@ static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
D->addAttr(::new (S.Context) ObjCPreciseLifetimeAttr(S.Context, AL));
}
+static void handleSwiftAttrAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // Make sure that there is a string literal as the annotation's single
+ // argument.
+ StringRef Str;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str))
+ return;
+
+ D->addAttr(::new (S.Context) SwiftAttrAttr(S.Context, AL, Str));
+}
+
+static void handleSwiftBridge(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // Make sure that there is a string literal as the annotation's single
+ // argument.
+ StringRef BT;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, BT))
+ return;
+
+ // Don't duplicate annotations that are already set.
+ if (D->hasAttr<SwiftBridgeAttr>()) {
+ S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) SwiftBridgeAttr(S.Context, AL, BT));
+}
+
+static bool isErrorParameter(Sema &S, QualType QT) {
+ const auto *PT = QT->getAs<PointerType>();
+ if (!PT)
+ return false;
+
+ QualType Pointee = PT->getPointeeType();
+
+ // Check for NSError**.
+ if (const auto *OPT = Pointee->getAs<ObjCObjectPointerType>())
+ if (const auto *ID = OPT->getInterfaceDecl())
+ if (ID->getIdentifier() == S.getNSErrorIdent())
+ return true;
+
+ // Check for CFError**.
+ if (const auto *PT = Pointee->getAs<PointerType>())
+ if (const auto *RT = PT->getPointeeType()->getAs<RecordType>())
+ if (S.isCFError(RT->getDecl()))
+ return true;
+
+ return false;
+}
+
+static void handleSwiftError(Sema &S, Decl *D, const ParsedAttr &AL) {
+ auto hasErrorParameter = [](Sema &S, Decl *D, const ParsedAttr &AL) -> bool {
+ for (unsigned I = 0, E = getFunctionOrMethodNumParams(D); I != E; ++I) {
+ if (isErrorParameter(S, getFunctionOrMethodParamType(D, I)))
+ return true;
+ }
+
+ S.Diag(AL.getLoc(), diag::err_attr_swift_error_no_error_parameter)
+ << AL << isa<ObjCMethodDecl>(D);
+ return false;
+ };
+
+ auto hasPointerResult = [](Sema &S, Decl *D, const ParsedAttr &AL) -> bool {
+ // - C, ObjC, and block pointers are definitely okay.
+ // - References are definitely not okay.
+ // - nullptr_t is weird, but acceptable.
+ QualType RT = getFunctionOrMethodResultType(D);
+ if (RT->hasPointerRepresentation() && !RT->isReferenceType())
+ return true;
+
+ S.Diag(AL.getLoc(), diag::err_attr_swift_error_return_type)
+ << AL << AL.getArgAsIdent(0)->Ident->getName() << isa<ObjCMethodDecl>(D)
+ << /*pointer*/ 1;
+ return false;
+ };
+
+ auto hasIntegerResult = [](Sema &S, Decl *D, const ParsedAttr &AL) -> bool {
+ QualType RT = getFunctionOrMethodResultType(D);
+ if (RT->isIntegralType(S.Context))
+ return true;
+
+ S.Diag(AL.getLoc(), diag::err_attr_swift_error_return_type)
+ << AL << AL.getArgAsIdent(0)->Ident->getName() << isa<ObjCMethodDecl>(D)
+ << /*integral*/ 0;
+ return false;
+ };
+
+ if (D->isInvalidDecl())
+ return;
+
+ IdentifierLoc *Loc = AL.getArgAsIdent(0);
+ SwiftErrorAttr::ConventionKind Convention;
+ if (!SwiftErrorAttr::ConvertStrToConventionKind(Loc->Ident->getName(),
+ Convention)) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL << Loc->Ident;
+ return;
+ }
+
+ switch (Convention) {
+ case SwiftErrorAttr::None:
+ // No additional validation required.
+ break;
+
+ case SwiftErrorAttr::NonNullError:
+ if (!hasErrorParameter(S, D, AL))
+ return;
+ break;
+
+ case SwiftErrorAttr::NullResult:
+ if (!hasErrorParameter(S, D, AL) || !hasPointerResult(S, D, AL))
+ return;
+ break;
+
+ case SwiftErrorAttr::NonZeroResult:
+ case SwiftErrorAttr::ZeroResult:
+ if (!hasErrorParameter(S, D, AL) || !hasIntegerResult(S, D, AL))
+ return;
+ break;
+ }
+
+ D->addAttr(::new (S.Context) SwiftErrorAttr(S.Context, AL, Convention));
+}
+
+// For a function, this will validate a compound Swift name, e.g.
+// <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, and
+// the function will output the number of parameter names, and whether this is a
+// single-arg initializer.
+//
+// For a type, enum constant, property, or variable declaration, this will
+// validate either a simple identifier, or a qualified
+// <code>context.identifier</code> name.
+static bool
+validateSwiftFunctionName(Sema &S, const ParsedAttr &AL, SourceLocation Loc,
+ StringRef Name, unsigned &SwiftParamCount,
+ bool &IsSingleParamInit) {
+ SwiftParamCount = 0;
+ IsSingleParamInit = false;
+
+ // Check whether this will be mapped to a getter or setter of a property.
+ bool IsGetter = false, IsSetter = false;
+ if (Name.startswith("getter:")) {
+ IsGetter = true;
+ Name = Name.substr(7);
+ } else if (Name.startswith("setter:")) {
+ IsSetter = true;
+ Name = Name.substr(7);
+ }
+
+ if (Name.back() != ')') {
+ S.Diag(Loc, diag::warn_attr_swift_name_function) << AL;
+ return false;
+ }
+
+ bool IsMember = false;
+ StringRef ContextName, BaseName, Parameters;
+
+ std::tie(BaseName, Parameters) = Name.split('(');
+
+ // Split at the first '.', if it exists, which separates the context name
+ // from the base name.
+ std::tie(ContextName, BaseName) = BaseName.split('.');
+ if (BaseName.empty()) {
+ BaseName = ContextName;
+ ContextName = StringRef();
+ } else if (ContextName.empty() || !isValidIdentifier(ContextName)) {
+ S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
+ << AL << /*context*/ 1;
+ return false;
+ } else {
+ IsMember = true;
+ }
+
+ if (!isValidIdentifier(BaseName) || BaseName == "_") {
+ S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
+ << AL << /*basename*/ 0;
+ return false;
+ }
+
+ bool IsSubscript = BaseName == "subscript";
+ // A subscript accessor must be a getter or setter.
+ if (IsSubscript && !IsGetter && !IsSetter) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_invalid_parameter)
+ << AL << /* getter or setter */ 0;
+ return false;
+ }
+
+ if (Parameters.empty()) {
+ S.Diag(Loc, diag::warn_attr_swift_name_missing_parameters) << AL;
+ return false;
+ }
+
+ assert(Parameters.back() == ')' && "expected ')'");
+ Parameters = Parameters.drop_back(); // ')'
+
+ if (Parameters.empty()) {
+ // Setters and subscripts must have at least one parameter.
+ if (IsSubscript) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_invalid_parameter)
+ << AL << /* have at least one parameter */1;
+ return false;
+ }
+
+ if (IsSetter) {
+ S.Diag(Loc, diag::warn_attr_swift_name_setter_parameters) << AL;
+ return false;
+ }
+
+ return true;
+ }
+
+ if (Parameters.back() != ':') {
+ S.Diag(Loc, diag::warn_attr_swift_name_function) << AL;
+ return false;
+ }
+
+ StringRef CurrentParam;
+ llvm::Optional<unsigned> SelfLocation;
+ unsigned NewValueCount = 0;
+ llvm::Optional<unsigned> NewValueLocation;
+ do {
+ std::tie(CurrentParam, Parameters) = Parameters.split(':');
+
+ if (!isValidIdentifier(CurrentParam)) {
+ S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
+ << AL << /*parameter*/2;
+ return false;
+ }
+
+ if (IsMember && CurrentParam == "self") {
+ // "self" indicates the "self" argument for a member.
+
+ // More than one "self"?
+ if (SelfLocation) {
+ S.Diag(Loc, diag::warn_attr_swift_name_multiple_selfs) << AL;
+ return false;
+ }
+
+ // The "self" location is the current parameter.
+ SelfLocation = SwiftParamCount;
+ } else if (CurrentParam == "newValue") {
+ // "newValue" indicates the "newValue" argument for a setter.
+
+ // There should only be one 'newValue', but it's only significant for
+ // subscript accessors, so don't error right away.
+ ++NewValueCount;
+
+ NewValueLocation = SwiftParamCount;
+ }
+
+ ++SwiftParamCount;
+ } while (!Parameters.empty());
+
+ // Only instance subscripts are currently supported.
+ if (IsSubscript && !SelfLocation) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_invalid_parameter)
+ << AL << /*have a 'self:' parameter*/2;
+ return false;
+ }
+
+ IsSingleParamInit =
+ SwiftParamCount == 1 && BaseName == "init" && CurrentParam != "_";
+
+ // Check the number of parameters for a getter/setter.
+ if (IsGetter || IsSetter) {
+ // Setters have one parameter for the new value.
+ unsigned NumExpectedParams = IsGetter ? 0 : 1;
+ unsigned ParamDiag =
+ IsGetter ? diag::warn_attr_swift_name_getter_parameters
+ : diag::warn_attr_swift_name_setter_parameters;
+
+ // Instance methods have one parameter for "self".
+ if (SelfLocation)
+ ++NumExpectedParams;
+
+ // Subscripts may have additional parameters beyond the expected params for
+ // the index.
+ if (IsSubscript) {
+ if (SwiftParamCount < NumExpectedParams) {
+ S.Diag(Loc, ParamDiag) << AL;
+ return false;
+ }
+
+ // A subscript setter must explicitly label its newValue parameter to
+ // distinguish it from index parameters.
+ if (IsSetter) {
+ if (!NewValueLocation) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_setter_no_newValue)
+ << AL;
+ return false;
+ }
+ if (NewValueCount > 1) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_setter_multiple_newValues)
+ << AL;
+ return false;
+ }
+ } else {
+ // Subscript getters should have no 'newValue:' parameter.
+ if (NewValueLocation) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_getter_newValue)
+ << AL;
+ return false;
+ }
+ }
+ } else {
+ // Property accessors must have exactly the number of expected params.
+ if (SwiftParamCount != NumExpectedParams) {
+ S.Diag(Loc, ParamDiag) << AL;
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool Sema::DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
+ const ParsedAttr &AL, bool IsAsync) {
+ if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
+ ArrayRef<ParmVarDecl*> Params;
+ unsigned ParamCount;
+
+ if (const auto *Method = dyn_cast<ObjCMethodDecl>(D)) {
+ ParamCount = Method->getSelector().getNumArgs();
+ Params = Method->parameters().slice(0, ParamCount);
+ } else {
+ const auto *F = cast<FunctionDecl>(D);
+
+ ParamCount = F->getNumParams();
+ Params = F->parameters();
+
+ if (!F->hasWrittenPrototype()) {
+ Diag(Loc, diag::warn_attribute_wrong_decl_type) << AL
+ << ExpectedFunctionWithProtoType;
+ return false;
+ }
+ }
+
+ // The async name drops the last callback parameter.
+ if (IsAsync) {
+ if (ParamCount == 0) {
+ Diag(Loc, diag::warn_attr_swift_name_decl_missing_params)
+ << AL << isa<ObjCMethodDecl>(D);
+ return false;
+ }
+ ParamCount -= 1;
+ }
+
+ unsigned SwiftParamCount;
+ bool IsSingleParamInit;
+ if (!validateSwiftFunctionName(*this, AL, Loc, Name,
+ SwiftParamCount, IsSingleParamInit))
+ return false;
+
+ bool ParamCountValid;
+ if (SwiftParamCount == ParamCount) {
+ ParamCountValid = true;
+ } else if (SwiftParamCount > ParamCount) {
+ ParamCountValid = IsSingleParamInit && ParamCount == 0;
+ } else {
+ // We have fewer Swift parameters than Objective-C parameters, but that
+ // might be because we've transformed some of them. Check for potential
+ // "out" parameters and err on the side of not warning.
+ unsigned MaybeOutParamCount =
+ std::count_if(Params.begin(), Params.end(),
+ [](const ParmVarDecl *Param) -> bool {
+ QualType ParamTy = Param->getType();
+ if (ParamTy->isReferenceType() || ParamTy->isPointerType())
+ return !ParamTy->getPointeeType().isConstQualified();
+ return false;
+ });
+
+ ParamCountValid = SwiftParamCount + MaybeOutParamCount >= ParamCount;
+ }
+
+ if (!ParamCountValid) {
+ Diag(Loc, diag::warn_attr_swift_name_num_params)
+ << (SwiftParamCount > ParamCount) << AL << ParamCount
+ << SwiftParamCount;
+ return false;
+ }
+ } else if ((isa<EnumConstantDecl>(D) || isa<ObjCProtocolDecl>(D) ||
+ isa<ObjCInterfaceDecl>(D) || isa<ObjCPropertyDecl>(D) ||
+ isa<VarDecl>(D) || isa<TypedefNameDecl>(D) || isa<TagDecl>(D) ||
+ isa<IndirectFieldDecl>(D) || isa<FieldDecl>(D)) &&
+ !IsAsync) {
+ StringRef ContextName, BaseName;
+
+ std::tie(ContextName, BaseName) = Name.split('.');
+ if (BaseName.empty()) {
+ BaseName = ContextName;
+ ContextName = StringRef();
+ } else if (!isValidIdentifier(ContextName)) {
+ Diag(Loc, diag::warn_attr_swift_name_invalid_identifier) << AL
+ << /*context*/1;
+ return false;
+ }
+
+ if (!isValidIdentifier(BaseName)) {
+ Diag(Loc, diag::warn_attr_swift_name_invalid_identifier) << AL
+ << /*basename*/0;
+ return false;
+ }
+ } else {
+ Diag(Loc, diag::warn_attr_swift_name_decl_kind) << AL;
+ return false;
+ }
+ return true;
+}
+
+static void handleSwiftName(Sema &S, Decl *D, const ParsedAttr &AL) {
+ StringRef Name;
+ SourceLocation Loc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Name, &Loc))
+ return;
+
+ if (!S.DiagnoseSwiftName(D, Name, Loc, AL, /*IsAsync=*/false))
+ return;
+
+ D->addAttr(::new (S.Context) SwiftNameAttr(S.Context, AL, Name));
+}
+
+static void handleSwiftAsyncName(Sema &S, Decl *D, const ParsedAttr &AL) {
+ StringRef Name;
+ SourceLocation Loc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Name, &Loc))
+ return;
+
+ if (!S.DiagnoseSwiftName(D, Name, Loc, AL, /*IsAsync=*/true))
+ return;
+
+ D->addAttr(::new (S.Context) SwiftAsyncNameAttr(S.Context, AL, Name));
+}
+
+static void handleSwiftNewType(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // Make sure that there is an identifier as the annotation's single argument.
+ if (!checkAttributeNumArgs(S, AL, 1))
+ return;
+
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ SwiftNewTypeAttr::NewtypeKind Kind;
+ IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
+ if (!SwiftNewTypeAttr::ConvertStrToNewtypeKind(II->getName(), Kind)) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << II;
+ return;
+ }
+
+ if (!isa<TypedefNameDecl>(D)) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type_str)
+ << AL << "typedefs";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) SwiftNewTypeAttr(S.Context, AL, Kind));
+}
+
+static void handleSwiftAsyncAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ SwiftAsyncAttr::Kind Kind;
+ IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
+ if (!SwiftAsyncAttr::ConvertStrToKind(II->getName(), Kind)) {
+ S.Diag(AL.getLoc(), diag::err_swift_async_no_access) << AL << II;
+ return;
+ }
+
+ ParamIdx Idx;
+ if (Kind == SwiftAsyncAttr::None) {
+ // If this is 'none', then there shouldn't be any additional arguments.
+ if (!checkAttributeNumArgs(S, AL, 1))
+ return;
+ } else {
+ // Non-none swift_async requires a completion handler index argument.
+ if (!checkAttributeNumArgs(S, AL, 2))
+ return;
+
+ Expr *HandlerIdx = AL.getArgAsExpr(1);
+ if (!checkFunctionOrMethodParameterIndex(S, D, AL, 2, HandlerIdx, Idx))
+ return;
+
+ const ParmVarDecl *CompletionBlock =
+ getFunctionOrMethodParam(D, Idx.getASTIndex());
+ QualType CompletionBlockType = CompletionBlock->getType();
+ if (!CompletionBlockType->isBlockPointerType()) {
+ S.Diag(CompletionBlock->getLocation(),
+ diag::err_swift_async_bad_block_type)
+ << CompletionBlock->getType();
+ return;
+ }
+ QualType BlockTy =
+ CompletionBlockType->getAs<BlockPointerType>()->getPointeeType();
+ if (!BlockTy->getAs<FunctionType>()->getReturnType()->isVoidType()) {
+ S.Diag(CompletionBlock->getLocation(),
+ diag::err_swift_async_bad_block_type)
+ << CompletionBlock->getType();
+ return;
+ }
+ }
+
+ D->addAttr(::new (S.Context) SwiftAsyncAttr(S.Context, AL, Kind, Idx));
+}
+
//===----------------------------------------------------------------------===//
// Microsoft specific attribute handlers.
//===----------------------------------------------------------------------===//
@@ -5686,18 +6441,18 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
Expr *NumParamsExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
- llvm::APSInt NumParams(32);
- if (!NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) {
+ Optional<llvm::APSInt> NumParams = llvm::APSInt(32);
+ if (!(NumParams = NumParamsExpr->getIntegerConstantExpr(S.Context))) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
<< AL << AANT_ArgumentIntegerConstant
<< NumParamsExpr->getSourceRange();
return;
}
// The argument should be in range 0..63.
- unsigned Num = NumParams.getLimitedValue(255);
+ unsigned Num = NumParams->getLimitedValue(255);
if (Num > 63) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << AL << (int)NumParams.getSExtValue()
+ << AL << (int)NumParams->getSExtValue()
<< NumParamsExpr->getSourceRange();
return;
}
@@ -6246,14 +7001,14 @@ DLLExportAttr *Sema::mergeDLLExportAttr(Decl *D,
static void handleDLLAttr(Sema &S, Decl *D, const ParsedAttr &A) {
if (isa<ClassTemplatePartialSpecializationDecl>(D) &&
- S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ (S.Context.getTargetInfo().shouldDLLImportComdatSymbols())) {
S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored) << A;
return;
}
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->isInlined() && A.getKind() == ParsedAttr::AT_DLLImport &&
- !S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ !(S.Context.getTargetInfo().shouldDLLImportComdatSymbols())) {
// MinGW doesn't allow dllimport on inline functions.
S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored_on_inline)
<< A;
@@ -6262,7 +7017,7 @@ static void handleDLLAttr(Sema &S, Decl *D, const ParsedAttr &A) {
}
if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
- if (S.Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ if ((S.Context.getTargetInfo().shouldDLLImportComdatSymbols()) &&
MD->getParent()->isLambda()) {
S.Diag(A.getRange().getBegin(), diag::err_attribute_dll_lambda) << A;
return;
@@ -6788,6 +7543,75 @@ static void handleCFGuardAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) CFGuardAttr(S.Context, AL, Arg));
}
+
+template <typename AttrTy>
+static const AttrTy *findEnforceTCBAttrByName(Decl *D, StringRef Name) {
+ auto Attrs = D->specific_attrs<AttrTy>();
+ auto I = llvm::find_if(Attrs,
+ [Name](const AttrTy *A) {
+ return A->getTCBName() == Name;
+ });
+ return I == Attrs.end() ? nullptr : *I;
+}
+
+template <typename AttrTy, typename ConflictingAttrTy>
+static void handleEnforceTCBAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ StringRef Argument;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Argument))
+ return;
+
+ // A function cannot be have both regular and leaf membership in the same TCB.
+ if (const ConflictingAttrTy *ConflictingAttr =
+ findEnforceTCBAttrByName<ConflictingAttrTy>(D, Argument)) {
+ // We could attach a note to the other attribute but in this case
+ // there's no need given how the two are very close to each other.
+ S.Diag(AL.getLoc(), diag::err_tcb_conflicting_attributes)
+ << AL.getAttrName()->getName() << ConflictingAttr->getAttrName()->getName()
+ << Argument;
+
+ // Error recovery: drop the non-leaf attribute so that to suppress
+ // all future warnings caused by erroneous attributes. The leaf attribute
+ // needs to be kept because it can only suppresses warnings, not cause them.
+ D->dropAttr<EnforceTCBAttr>();
+ return;
+ }
+
+ D->addAttr(AttrTy::Create(S.Context, Argument, AL));
+}
+
+template <typename AttrTy, typename ConflictingAttrTy>
+static AttrTy *mergeEnforceTCBAttrImpl(Sema &S, Decl *D, const AttrTy &AL) {
+ // Check if the new redeclaration has different leaf-ness in the same TCB.
+ StringRef TCBName = AL.getTCBName();
+ if (const ConflictingAttrTy *ConflictingAttr =
+ findEnforceTCBAttrByName<ConflictingAttrTy>(D, TCBName)) {
+ S.Diag(ConflictingAttr->getLoc(), diag::err_tcb_conflicting_attributes)
+ << ConflictingAttr->getAttrName()->getName()
+ << AL.getAttrName()->getName() << TCBName;
+
+ // Add a note so that the user could easily find the conflicting attribute.
+ S.Diag(AL.getLoc(), diag::note_conflicting_attribute);
+
+ // More error recovery.
+ D->dropAttr<EnforceTCBAttr>();
+ return nullptr;
+ }
+
+ ASTContext &Context = S.getASTContext();
+ return ::new(Context) AttrTy(Context, AL, AL.getTCBName());
+}
+
+EnforceTCBAttr *Sema::mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL) {
+ return mergeEnforceTCBAttrImpl<EnforceTCBAttr, EnforceTCBLeafAttr>(
+ *this, D, AL);
+}
+
+EnforceTCBLeafAttr *Sema::mergeEnforceTCBLeafAttr(
+ Decl *D, const EnforceTCBLeafAttr &AL) {
+ return mergeEnforceTCBAttrImpl<EnforceTCBLeafAttr, EnforceTCBAttr>(
+ *this, D, AL);
+}
+
//===----------------------------------------------------------------------===//
// Top Level Sema Entry Points
//===----------------------------------------------------------------------===//
@@ -6815,7 +7639,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
AL.isDeclspecAttribute()
? (unsigned)diag::warn_unhandled_ms_attribute_ignored
: (unsigned)diag::warn_unknown_attribute_ignored)
- << AL;
+ << AL << AL.getRange();
return;
}
@@ -6939,19 +7763,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handlePassObjectSizeAttr(S, D, AL);
break;
case ParsedAttr::AT_Constructor:
- if (S.Context.getTargetInfo().getTriple().isOSAIX())
- llvm::report_fatal_error(
- "'constructor' attribute is not yet supported on AIX");
- else
handleConstructorAttr(S, D, AL);
break;
case ParsedAttr::AT_Deprecated:
handleDeprecatedAttr(S, D, AL);
break;
case ParsedAttr::AT_Destructor:
- if (S.Context.getTargetInfo().getTriple().isOSAIX())
- llvm::report_fatal_error("'destructor' attribute is not yet supported on AIX");
- else
handleDestructorAttr(S, D, AL);
break;
case ParsedAttr::AT_EnableIf:
@@ -6990,16 +7807,21 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Callback:
handleCallbackAttr(S, D, AL);
break;
+ case ParsedAttr::AT_CalledOnce:
+ handleCalledOnceAttr(S, D, AL);
+ break;
case ParsedAttr::AT_CUDAGlobal:
handleGlobalAttr(S, D, AL);
break;
case ParsedAttr::AT_CUDADevice:
- handleSimpleAttributeWithExclusions<CUDADeviceAttr, CUDAGlobalAttr>(S, D,
- AL);
+ handleDeviceAttr(S, D, AL);
break;
case ParsedAttr::AT_CUDAHost:
handleSimpleAttributeWithExclusions<CUDAHostAttr, CUDAGlobalAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_HIPManaged:
+ handleManagedAttr(S, D, AL);
+ break;
case ParsedAttr::AT_CUDADeviceBuiltinSurfaceType:
handleSimpleAttributeWithExclusions<CUDADeviceBuiltinSurfaceTypeAttr,
CUDADeviceBuiltinTextureTypeAttr>(S, D,
@@ -7058,6 +7880,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_AnyX86NoCfCheck:
handleNoCfCheckAttr(S, D, AL);
break;
+ case ParsedAttr::AT_Leaf:
+ handleSimpleAttribute<LeafAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_NoThrow:
if (!AL.isUsedAsTypeAttr())
handleSimpleAttribute<NoThrowAttr>(S, D, AL);
@@ -7098,6 +7923,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_ObjCBoxable:
handleObjCBoxable(S, D, AL);
break;
+ case ParsedAttr::AT_NSErrorDomain:
+ handleNSErrorDomain(S, D, AL);
+ break;
case ParsedAttr::AT_CFAuditedTransfer:
handleSimpleAttributeWithExclusions<CFAuditedTransferAttr,
CFUnknownTransferAttr>(S, D, AL);
@@ -7155,6 +7983,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Packed:
handlePackedAttr(S, D, AL);
break;
+ case ParsedAttr::AT_PreferredName:
+ handlePreferredName(S, D, AL);
+ break;
case ParsedAttr::AT_Section:
handleSectionAttr(S, D, AL);
break;
@@ -7179,9 +8010,15 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Unavailable:
handleAttrWithMessage<UnavailableAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_Assumption:
+ handleAssumumptionAttr(S, D, AL);
+ break;
case ParsedAttr::AT_ObjCDirect:
handleObjCDirectAttr(S, D, AL);
break;
+ case ParsedAttr::AT_ObjCNonRuntimeProtocol:
+ handleObjCNonRuntimeProtocolAttr(S, D, AL);
+ break;
case ParsedAttr::AT_ObjCDirectMembers:
handleObjCDirectMembersAttr(S, D, AL);
handleSimpleAttribute<ObjCDirectMembersAttr>(S, D, AL);
@@ -7200,6 +8037,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleSimpleAttributeWithExclusions<DisableTailCallsAttr, NakedAttr>(S, D,
AL);
break;
+ case ParsedAttr::AT_NoMerge:
+ handleSimpleAttribute<NoMergeAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_Visibility:
handleVisibilityAttr(S, D, AL, false);
break;
@@ -7397,6 +8237,38 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleTypeTagForDatatypeAttr(S, D, AL);
break;
+ // Swift attributes.
+ case ParsedAttr::AT_SwiftAsyncName:
+ handleSwiftAsyncName(S, D, AL);
+ break;
+ case ParsedAttr::AT_SwiftAttr:
+ handleSwiftAttrAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_SwiftBridge:
+ handleSwiftBridge(S, D, AL);
+ break;
+ case ParsedAttr::AT_SwiftBridgedTypedef:
+ handleSimpleAttribute<SwiftBridgedTypedefAttr>(S, D, AL);
+ break;
+ case ParsedAttr::AT_SwiftError:
+ handleSwiftError(S, D, AL);
+ break;
+ case ParsedAttr::AT_SwiftName:
+ handleSwiftName(S, D, AL);
+ break;
+ case ParsedAttr::AT_SwiftNewType:
+ handleSwiftNewType(S, D, AL);
+ break;
+ case ParsedAttr::AT_SwiftObjCMembers:
+ handleSimpleAttribute<SwiftObjCMembersAttr>(S, D, AL);
+ break;
+ case ParsedAttr::AT_SwiftPrivate:
+ handleSimpleAttribute<SwiftPrivateAttr>(S, D, AL);
+ break;
+ case ParsedAttr::AT_SwiftAsync:
+ handleSwiftAsyncAttr(S, D, AL);
+ break;
+
// XRay attributes.
case ParsedAttr::AT_XRayLogArgs:
handleXRayLogArgsAttr(S, D, AL);
@@ -7446,6 +8318,14 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_UseHandle:
handleHandleAttr<UseHandleAttr>(S, D, AL);
break;
+
+ case ParsedAttr::AT_EnforceTCB:
+ handleEnforceTCBAttr<EnforceTCBAttr, EnforceTCBLeafAttr>(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_EnforceTCBLeaf:
+ handleEnforceTCBAttr<EnforceTCBLeafAttr, EnforceTCBAttr>(S, D, AL);
+ break;
}
}
@@ -7600,8 +8480,8 @@ NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
NewFD = FunctionDecl::Create(
FD->getASTContext(), FD->getDeclContext(), Loc, Loc,
DeclarationName(II), FD->getType(), FD->getTypeSourceInfo(), SC_None,
- false /*isInlineSpecified*/, FD->hasPrototype(), CSK_unspecified,
- FD->getTrailingRequiresClause());
+ false /*isInlineSpecified*/, FD->hasPrototype(),
+ ConstexprSpecKind::Unspecified, FD->getTrailingRequiresClause());
NewD = NewFD;
if (FD->getQualifier())
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index 22bf35dbd0cb..8bfaa46162bc 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -655,7 +655,8 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
// contain the constexpr specifier.
if (New->getConstexprKind() != Old->getConstexprKind()) {
Diag(New->getLocation(), diag::err_constexpr_redecl_mismatch)
- << New << New->getConstexprKind() << Old->getConstexprKind();
+ << New << static_cast<int>(New->getConstexprKind())
+ << static_cast<int>(Old->getConstexprKind());
Diag(Old->getLocation(), diag::note_previous_declaration);
Invalid = true;
} else if (!Old->getMostRecentDecl()->isInlined() && New->isInlined() &&
@@ -694,6 +695,17 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
Invalid = true;
}
+ // C++11 [temp.friend]p4 (DR329):
+ // When a function is defined in a friend function declaration in a class
+ // template, the function is instantiated when the function is odr-used.
+ // The same restrictions on multiple declarations and definitions that
+ // apply to non-template function declarations and definitions also apply
+ // to these implicit definitions.
+ const FunctionDecl *OldDefinition = nullptr;
+ if (New->isThisDeclarationInstantiatedFromAFriendDefinition() &&
+ Old->isDefined(OldDefinition, true))
+ CheckForFunctionRedefinition(New, OldDefinition);
+
return Invalid;
}
@@ -723,7 +735,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
Diag(Decomp.getLSquareLoc(),
!getLangOpts().CPlusPlus17
? diag::ext_decomp_decl
- : D.getContext() == DeclaratorContext::ConditionContext
+ : D.getContext() == DeclaratorContext::Condition
? diag::ext_decomp_decl_cond
: diag::warn_cxx14_compat_decomp_decl)
<< Decomp.getSourceRange();
@@ -890,7 +902,8 @@ static bool checkSimpleDecomposition(
llvm::function_ref<ExprResult(SourceLocation, Expr *, unsigned)> GetInit) {
if ((int64_t)Bindings.size() != NumElems) {
S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings)
- << DecompType << (unsigned)Bindings.size() << NumElems.toString(10)
+ << DecompType << (unsigned)Bindings.size()
+ << (unsigned)NumElems.getLimitedValue(UINT_MAX) << NumElems.toString(10)
<< (NumElems < Bindings.size());
return true;
}
@@ -1066,8 +1079,9 @@ static IsTupleLike isTupleLike(Sema &S, SourceLocation Loc, QualType T,
TemplateArgumentListInfo &Args;
ICEDiagnoser(LookupResult &R, TemplateArgumentListInfo &Args)
: R(R), Args(Args) {}
- void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
- S.Diag(Loc, diag::err_decomp_decl_std_tuple_size_not_constant)
+ Sema::SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
+ SourceLocation Loc) override {
+ return S.Diag(Loc, diag::err_decomp_decl_std_tuple_size_not_constant)
<< printTemplateArgs(S.Context.getPrintingPolicy(), Args);
}
} Diagnoser(R, Args);
@@ -1077,7 +1091,7 @@ static IsTupleLike isTupleLike(Sema &S, SourceLocation Loc, QualType T,
if (E.isInvalid())
return IsTupleLike::Error;
- E = S.VerifyIntegerConstantExpression(E.get(), &Size, Diagnoser, false);
+ E = S.VerifyIntegerConstantExpression(E.get(), &Size, Diagnoser);
if (E.isInvalid())
return IsTupleLike::Error;
@@ -1135,8 +1149,9 @@ static bool checkTupleLikeDecomposition(Sema &S,
const llvm::APSInt &TupleSize) {
if ((int64_t)Bindings.size() != TupleSize) {
S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings)
- << DecompType << (unsigned)Bindings.size() << TupleSize.toString(10)
- << (TupleSize < Bindings.size());
+ << DecompType << (unsigned)Bindings.size()
+ << (unsigned)TupleSize.getLimitedValue(UINT_MAX)
+ << TupleSize.toString(10) << (TupleSize < Bindings.size());
return true;
}
@@ -1184,7 +1199,8 @@ static bool checkTupleLikeDecomposition(Sema &S,
// an xvalue otherwise
if (!Src->getType()->isLValueReferenceType())
E = ImplicitCastExpr::Create(S.Context, E.get()->getType(), CK_NoOp,
- E.get(), nullptr, VK_XValue);
+ E.get(), nullptr, VK_XValue,
+ FPOptionsOverride());
TemplateArgumentListInfo Args(Loc, Loc);
Args.addArgument(
@@ -1248,8 +1264,7 @@ static bool checkTupleLikeDecomposition(Sema &S,
if (E.isInvalid())
return true;
RefVD->setInit(E.get());
- if (!E.get()->isValueDependent())
- RefVD->checkInitIsICE();
+ S.CheckCompleteVariableDeclaration(RefVD);
E = S.BuildDeclarationNameExpr(CXXScopeSpec(),
DeclarationNameInfo(B->getDeclName(), Loc),
@@ -1360,7 +1375,7 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
[](FieldDecl *FD) { return !FD->isUnnamedBitfield(); });
assert(Bindings.size() != NumFields);
S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings)
- << DecompType << (unsigned)Bindings.size() << NumFields
+ << DecompType << (unsigned)Bindings.size() << NumFields << NumFields
<< (NumFields < Bindings.size());
return true;
};
@@ -1373,11 +1388,23 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
if (FD->isUnnamedBitfield())
continue;
- if (FD->isAnonymousStructOrUnion()) {
- S.Diag(Src->getLocation(), diag::err_decomp_decl_anon_union_member)
- << DecompType << FD->getType()->isUnionType();
- S.Diag(FD->getLocation(), diag::note_declared_at);
- return true;
+ // All the non-static data members are required to be nameable, so they
+ // must all have names.
+ if (!FD->getDeclName()) {
+ if (RD->isLambda()) {
+ S.Diag(Src->getLocation(), diag::err_decomp_decl_lambda);
+ S.Diag(RD->getLocation(), diag::note_lambda_decl);
+ return true;
+ }
+
+ if (FD->isAnonymousStructOrUnion()) {
+ S.Diag(Src->getLocation(), diag::err_decomp_decl_anon_union_member)
+ << DecompType << FD->getType()->isUnionType();
+ S.Diag(FD->getLocation(), diag::note_declared_at);
+ return true;
+ }
+
+ // FIXME: Are there any other ways we could have an anonymous member?
}
// We have a real field to bind.
@@ -1618,7 +1645,7 @@ static bool CheckConstexprDestructorSubobjects(Sema &SemaRef,
if (Kind == Sema::CheckConstexprKind::Diagnose) {
SemaRef.Diag(DD->getLocation(), diag::err_constexpr_dtor_subobject)
- << DD->getConstexprKind() << !FD
+ << static_cast<int>(DD->getConstexprKind()) << !FD
<< (FD ? FD->getDeclName() : DeclarationName()) << T;
SemaRef.Diag(Loc, diag::note_constexpr_dtor_subobject)
<< !FD << (FD ? FD->getDeclName() : DeclarationName()) << T;
@@ -2588,7 +2615,7 @@ Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
Diag(AL.getLoc(), AL.getKind() == ParsedAttr::UnknownAttribute
? (unsigned)diag::warn_unknown_attribute_ignored
: (unsigned)diag::err_base_specifier_attribute)
- << AL;
+ << AL << AL.getRange();
}
TypeSourceInfo *TInfo = nullptr;
@@ -3577,8 +3604,10 @@ namespace {
Base = SubME->getBase();
}
- if (!isa<CXXThisExpr>(Base->IgnoreParenImpCasts()))
+ if (!isa<CXXThisExpr>(Base->IgnoreParenImpCasts())) {
+ Visit(Base);
return;
+ }
if (AddressOf && AllPODFields)
return;
@@ -3905,9 +3934,22 @@ void Sema::ActOnStartTrailingRequiresClause(Scope *S, Declarator &D) {
}
ExprResult Sema::ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr) {
+ return ActOnRequiresClause(ConstraintExpr);
+}
+
+ExprResult Sema::ActOnRequiresClause(ExprResult ConstraintExpr) {
+ if (ConstraintExpr.isInvalid())
+ return ExprError();
+
+ ConstraintExpr = CorrectDelayedTyposInExpr(ConstraintExpr);
if (ConstraintExpr.isInvalid())
return ExprError();
- return CorrectDelayedTyposInExpr(ConstraintExpr);
+
+ if (DiagnoseUnexpandedParameterPack(ConstraintExpr.get(),
+ UPPC_RequiresClause))
+ return ExprError();
+
+ return ConstraintExpr;
}
/// This is invoked after parsing an in-class initializer for a
@@ -5478,8 +5520,9 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
// Bases.
for (const auto &Base : ClassDecl->bases()) {
- // Bases are always records in a well-formed non-dependent class.
const RecordType *RT = Base.getType()->getAs<RecordType>();
+ if (!RT)
+ continue;
// Remember direct virtual bases.
if (Base.isVirtual()) {
@@ -5869,13 +5912,22 @@ static void ReferenceDllExportedMembers(Sema &S, CXXRecordDecl *Class) {
// The function will be passed to the consumer when its definition is
// encountered.
- } else if (!MD->isTrivial() || MD->isExplicitlyDefaulted() ||
+ } else if (MD->isExplicitlyDefaulted()) {
+ // Synthesize and instantiate explicitly defaulted methods.
+ S.MarkFunctionReferenced(Class->getLocation(), MD);
+
+ if (TSK != TSK_ExplicitInstantiationDefinition) {
+ // Except for explicit instantiation defs, we will not see the
+ // definition again later, so pass it to the consumer now.
+ S.Consumer.HandleTopLevelDecl(DeclGroupRef(MD));
+ }
+ } else if (!MD->isTrivial() ||
MD->isCopyAssignmentOperator() ||
MD->isMoveAssignmentOperator()) {
- // Synthesize and instantiate non-trivial implicit methods, explicitly
- // defaulted methods, and the copy and move assignment operators. The
- // latter are exported even if they are trivial, because the address of
- // an operator can be taken and should compare equal across libraries.
+ // Synthesize and instantiate non-trivial implicit methods, and the copy
+ // and move assignment operators. The latter are exported even if they
+ // are trivial, because the address of an operator can be taken and
+ // should compare equal across libraries.
S.MarkFunctionReferenced(Class->getLocation(), MD);
// There is no later point when we will see the definition of this
@@ -6056,7 +6108,7 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
Attr *ClassAttr = getDLLAttr(Class);
// MSVC inherits DLL attributes to partial class template specializations.
- if (Context.getTargetInfo().getCXXABI().isMicrosoft() && !ClassAttr) {
+ if (Context.getTargetInfo().shouldDLLImportComdatSymbols() && !ClassAttr) {
if (auto *Spec = dyn_cast<ClassTemplatePartialSpecializationDecl>(Class)) {
if (Attr *TemplateAttr =
getDLLAttr(Spec->getSpecializedTemplate()->getTemplatedDecl())) {
@@ -6076,7 +6128,7 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
return;
}
- if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ if (Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
!ClassAttr->isInherited()) {
// Diagnose dll attributes on members of class with dll attribute.
for (Decl *Member : Class->decls()) {
@@ -6141,8 +6193,7 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
if (MD->isInlined()) {
// MinGW does not import or export inline methods. But do it for
// template instantiations.
- if (!Context.getTargetInfo().getCXXABI().isMicrosoft() &&
- !Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment() &&
+ if (!Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
TSK != TSK_ExplicitInstantiationDeclaration &&
TSK != TSK_ExplicitInstantiationDefinition)
continue;
@@ -7331,9 +7382,10 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
// If a function is explicitly defaulted on its first declaration, it is
// implicitly considered to be constexpr if the implicit declaration
// would be.
- MD->setConstexprKind(
- Constexpr ? (MD->isConsteval() ? CSK_consteval : CSK_constexpr)
- : CSK_unspecified);
+ MD->setConstexprKind(Constexpr ? (MD->isConsteval()
+ ? ConstexprSpecKind::Consteval
+ : ConstexprSpecKind::Constexpr)
+ : ConstexprSpecKind::Unspecified);
if (!Type->hasExceptionSpec()) {
// C++2a [except.spec]p3:
@@ -8022,10 +8074,10 @@ private:
if (ReturnFalse.isInvalid())
return StmtError();
- return S.ActOnIfStmt(Loc, false, nullptr,
+ return S.ActOnIfStmt(Loc, false, Loc, nullptr,
S.ActOnCondition(nullptr, Loc, NotCond.get(),
Sema::ConditionKind::Boolean),
- ReturnFalse.get(), SourceLocation(), nullptr);
+ Loc, ReturnFalse.get(), SourceLocation(), nullptr);
}
StmtResult visitSubobjectArray(QualType Type, llvm::APInt Size,
@@ -8177,9 +8229,9 @@ private:
return StmtError();
// if (...)
- return S.ActOnIfStmt(Loc, /*IsConstexpr=*/false, InitStmt, Cond,
- ReturnStmt.get(), /*ElseLoc=*/SourceLocation(),
- /*Else=*/nullptr);
+ return S.ActOnIfStmt(Loc, /*IsConstexpr=*/false, Loc, InitStmt, Cond, Loc,
+ ReturnStmt.get(),
+ /*ElseLoc=*/SourceLocation(), /*Else=*/nullptr);
}
case DefaultedComparisonKind::NotEqual:
@@ -8212,7 +8264,7 @@ static void lookupOperatorsForDefaultedComparison(Sema &Self, Scope *S,
UnresolvedSetImpl &Operators,
OverloadedOperatorKind Op) {
auto Lookup = [&](OverloadedOperatorKind OO) {
- Self.LookupOverloadedOperatorName(OO, S, QualType(), QualType(), Operators);
+ Self.LookupOverloadedOperatorName(OO, S, Operators);
};
// Every defaulted operator looks up itself.
@@ -8425,7 +8477,7 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// FIXME: Only applying this to the first declaration seems problematic, as
// simple reorderings can affect the meaning of the program.
if (First && !FD->isConstexpr() && Info.Constexpr)
- FD->setConstexprKind(CSK_constexpr);
+ FD->setConstexprKind(ConstexprSpecKind::Constexpr);
// C++2a [except.spec]p3:
// If a declaration of a function does not have a noexcept-specifier
@@ -9399,7 +9451,8 @@ static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
// brace-or-equal-initializer
if (CSM == Sema::CXXDefaultConstructor && FI->hasInClassInitializer()) {
if (Diagnose)
- S.Diag(FI->getLocation(), diag::note_nontrivial_in_class_init) << FI;
+ S.Diag(FI->getLocation(), diag::note_nontrivial_default_member_init)
+ << FI;
return false;
}
@@ -11106,8 +11159,8 @@ QualType Sema::CheckComparisonCategoryType(ComparisonCategoryType Kind,
// Attempt to diagnose reasons why the STL definition of this type
// might be foobar, including it failing to be a constant expression.
// TODO Handle more ways the lookup or result can be invalid.
- if (!VD->isStaticDataMember() || !VD->isConstexpr() || !VD->hasInit() ||
- !VD->checkInitIsICE())
+ if (!VD->isStaticDataMember() ||
+ !VD->isUsableInConstantExpressions(Context))
return UnsupportedSTLError(USS_InvalidMember, MemName, VD);
// Attempt to evaluate the var decl as a constant expression and extract
@@ -12936,7 +12989,8 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
Context, ClassDecl, ClassLoc, NameInfo, /*Type*/ QualType(),
/*TInfo=*/nullptr, ExplicitSpecifier(),
/*isInline=*/true, /*isImplicitlyDeclared=*/true,
- Constexpr ? CSK_constexpr : CSK_unspecified);
+ Constexpr ? ConstexprSpecKind::Constexpr
+ : ConstexprSpecKind::Unspecified);
DefaultCon->setAccess(AS_public);
DefaultCon->setDefaulted();
@@ -13057,7 +13111,7 @@ Sema::findInheritingConstructor(SourceLocation Loc,
Context, Derived, UsingLoc, NameInfo, TInfo->getType(), TInfo,
BaseCtor->getExplicitSpecifier(), /*isInline=*/true,
/*isImplicitlyDeclared=*/true,
- Constexpr ? BaseCtor->getConstexprKind() : CSK_unspecified,
+ Constexpr ? BaseCtor->getConstexprKind() : ConstexprSpecKind::Unspecified,
InheritedConstructor(Shadow, BaseCtor),
BaseCtor->getTrailingRequiresClause());
if (Shadow->isInvalidDecl())
@@ -13214,7 +13268,8 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
CXXDestructorDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
QualType(), nullptr, /*isInline=*/true,
/*isImplicitlyDeclared=*/true,
- Constexpr ? CSK_constexpr : CSK_unspecified);
+ Constexpr ? ConstexprSpecKind::Constexpr
+ : ConstexprSpecKind::Unspecified);
Destructor->setAccess(AS_public);
Destructor->setDefaulted();
@@ -13849,7 +13904,8 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
CXXMethodDecl *CopyAssignment = CXXMethodDecl::Create(
Context, ClassDecl, ClassLoc, NameInfo, QualType(),
/*TInfo=*/nullptr, /*StorageClass=*/SC_None,
- /*isInline=*/true, Constexpr ? CSK_constexpr : CSK_unspecified,
+ /*isInline=*/true,
+ Constexpr ? ConstexprSpecKind::Constexpr : ConstexprSpecKind::Unspecified,
SourceLocation());
CopyAssignment->setAccess(AS_public);
CopyAssignment->setDefaulted();
@@ -14174,7 +14230,8 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
CXXMethodDecl *MoveAssignment = CXXMethodDecl::Create(
Context, ClassDecl, ClassLoc, NameInfo, QualType(),
/*TInfo=*/nullptr, /*StorageClass=*/SC_None,
- /*isInline=*/true, Constexpr ? CSK_constexpr : CSK_unspecified,
+ /*isInline=*/true,
+ Constexpr ? ConstexprSpecKind::Constexpr : ConstexprSpecKind::Unspecified,
SourceLocation());
MoveAssignment->setAccess(AS_public);
MoveAssignment->setDefaulted();
@@ -14558,7 +14615,8 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
ExplicitSpecifier(),
/*isInline=*/true,
/*isImplicitlyDeclared=*/true,
- Constexpr ? CSK_constexpr : CSK_unspecified);
+ Constexpr ? ConstexprSpecKind::Constexpr
+ : ConstexprSpecKind::Unspecified);
CopyConstructor->setAccess(AS_public);
CopyConstructor->setDefaulted();
@@ -14691,7 +14749,8 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
ExplicitSpecifier(),
/*isInline=*/true,
/*isImplicitlyDeclared=*/true,
- Constexpr ? CSK_constexpr : CSK_unspecified);
+ Constexpr ? ConstexprSpecKind::Constexpr
+ : ConstexprSpecKind::Unspecified);
MoveConstructor->setAccess(AS_public);
MoveConstructor->setDefaulted();
@@ -14793,9 +14852,13 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
SynthesizedFunctionScope Scope(*this, Conv);
assert(!Conv->getReturnType()->isUndeducedType());
+ QualType ConvRT = Conv->getType()->getAs<FunctionType>()->getReturnType();
+ CallingConv CC =
+ ConvRT->getPointeeType()->getAs<FunctionType>()->getCallConv();
+
CXXRecordDecl *Lambda = Conv->getParent();
FunctionDecl *CallOp = Lambda->getLambdaCallOperator();
- FunctionDecl *Invoker = Lambda->getLambdaStaticInvoker();
+ FunctionDecl *Invoker = Lambda->getLambdaStaticInvoker(CC);
if (auto *TemplateArgs = Conv->getTemplateSpecializationArgs()) {
CallOp = InstantiateFunctionDeclaration(
@@ -14866,9 +14929,9 @@ void Sema::DefineImplicitLambdaToBlockPointerConversion(
// (since it's unusable otherwise); in the case where we inline the
// block literal, it has block literal lifetime semantics.
if (!BuildBlock.isInvalid() && !getLangOpts().ObjCAutoRefCount)
- BuildBlock = ImplicitCastExpr::Create(Context, BuildBlock.get()->getType(),
- CK_CopyAndAutoreleaseBlockObject,
- BuildBlock.get(), nullptr, VK_RValue);
+ BuildBlock = ImplicitCastExpr::Create(
+ Context, BuildBlock.get()->getType(), CK_CopyAndAutoreleaseBlockObject,
+ BuildBlock.get(), nullptr, VK_RValue, FPOptionsOverride());
if (BuildBlock.isInvalid()) {
Diag(CurrentLocation, diag::note_lambda_to_block_conv);
@@ -15032,24 +15095,14 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
DeclContext::lookup_result Lookup =
ClassPattern->lookup(Field->getDeclName());
- // Lookup can return at most two results: the pattern for the field, or the
- // injected class name of the parent record. No other member can have the
- // same name as the field.
- // In modules mode, lookup can return multiple results (coming from
- // different modules).
- assert((getLangOpts().Modules || (!Lookup.empty() && Lookup.size() <= 2)) &&
- "more than two lookup results for field name");
- FieldDecl *Pattern = dyn_cast<FieldDecl>(Lookup[0]);
- if (!Pattern) {
- assert(isa<CXXRecordDecl>(Lookup[0]) &&
- "cannot have other non-field member with same name");
- for (auto L : Lookup)
- if (isa<FieldDecl>(L)) {
- Pattern = cast<FieldDecl>(L);
- break;
- }
- assert(Pattern && "We must have set the Pattern!");
+ FieldDecl *Pattern = nullptr;
+ for (auto L : Lookup) {
+ if (isa<FieldDecl>(L)) {
+ Pattern = cast<FieldDecl>(L);
+ break;
+ }
}
+ assert(Pattern && "We must have set the Pattern!");
if (!Pattern->hasInClassInitializer() ||
InstantiateInClassInitializer(Loc, Field, Pattern,
@@ -15076,9 +15129,10 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
// constructor before the initializer is lexically complete will ultimately
// come here at which point we can diagnose it.
RecordDecl *OutermostClass = ParentRD->getOuterLexicalRecordContext();
- Diag(Loc, diag::err_in_class_initializer_not_yet_parsed)
+ Diag(Loc, diag::err_default_member_initializer_not_yet_parsed)
<< OutermostClass << Field;
- Diag(Field->getEndLoc(), diag::note_in_class_initializer_not_yet_parsed);
+ Diag(Field->getEndLoc(),
+ diag::note_default_member_initializer_not_yet_parsed);
// Recover by marking the field invalid, unless we're in a SFINAE context.
if (!isSFINAEContext())
Field->setInvalidDecl();
@@ -15503,6 +15557,18 @@ checkLiteralOperatorTemplateParameterList(Sema &SemaRef,
SemaRef.Context.hasSameType(PmDecl->getType(), SemaRef.Context.CharTy))
return false;
+ // C++20 [over.literal]p5:
+ // A string literal operator template is a literal operator template
+ // whose template-parameter-list comprises a single non-type
+ // template-parameter of class type.
+ //
+ // As a DR resolution, we also allow placeholders for deduced class
+ // template specializations.
+ if (SemaRef.getLangOpts().CPlusPlus20 &&
+ !PmDecl->isTemplateParameterPack() &&
+ (PmDecl->getType()->isRecordType() ||
+ PmDecl->getType()->getAs<DeducedTemplateSpecializationType>()))
+ return false;
} else if (TemplateParams->size() == 2) {
TemplateTypeParmDecl *PmType =
dyn_cast<TemplateTypeParmDecl>(TemplateParams->getParam(0));
@@ -15559,6 +15625,8 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
// template <char...> type operator "" name() and
// template <class T, T...> type operator "" name() are the only valid
// template signatures, and the only valid signatures with no parameters.
+ //
+ // C++20 also allows template <SomeClass T> type operator "" name().
if (TpDecl) {
if (FnDecl->param_size() != 0) {
Diag(FnDecl->getLocation(),
@@ -15988,9 +16056,10 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
AssertExpr = FullAssertExpr.get();
llvm::APSInt Cond;
- if (!Failed && VerifyIntegerConstantExpression(AssertExpr, &Cond,
- diag::err_static_assert_expression_is_not_constant,
- /*AllowFold=*/false).isInvalid())
+ if (!Failed && VerifyIntegerConstantExpression(
+ AssertExpr, &Cond,
+ diag::err_static_assert_expression_is_not_constant)
+ .isInvalid())
Failed = true;
if (!Failed && !Cond) {
@@ -16282,7 +16351,7 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
// Try to convert the decl specifier to a type. This works for
// friend templates because ActOnTag never produces a ClassTemplateDecl
// for a TUK_Friend.
- Declarator TheDeclarator(DS, DeclaratorContext::MemberContext);
+ Declarator TheDeclarator(DS, DeclaratorContext::Member);
TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator, S);
QualType T = TSI->getType();
if (TheDeclarator.isInvalidType())
diff --git a/clang/lib/Sema/SemaDeclObjC.cpp b/clang/lib/Sema/SemaDeclObjC.cpp
index d376880a40e8..60253a82e93a 100644
--- a/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/clang/lib/Sema/SemaDeclObjC.cpp
@@ -1066,6 +1066,11 @@ Decl *Sema::ActOnStartClassInterface(
ProcessDeclAttributeList(TUScope, IDecl, AttrList);
AddPragmaAttributes(TUScope, IDecl);
+
+ // Merge attributes from previous declarations.
+ if (PrevIDecl)
+ mergeDeclAttributes(IDecl, PrevIDecl);
+
PushOnScopeChains(IDecl, TUScope);
// Start the definition of this class. If we're in a redefinition case, there
@@ -1581,7 +1586,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
DS.SetRangeEnd(loc);
// Form the declarator.
- Declarator D(DS, DeclaratorContext::TypeNameContext);
+ Declarator D(DS, DeclaratorContext::TypeName);
// If we have a typedef of an Objective-C class type that is missing a '*',
// add the '*'.
@@ -2122,7 +2127,12 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
// Add ivar's to class's DeclContext.
for (unsigned i = 0, e = numIvars; i != e; ++i) {
ivars[i]->setLexicalDeclContext(ImpDecl);
- IDecl->makeDeclVisibleInContext(ivars[i]);
+ // In a 'fragile' runtime the ivar was added to the implicit
+ // ObjCInterfaceDecl while in a 'non-fragile' runtime the ivar is
+ // only in the ObjCImplementationDecl. In the non-fragile case the ivar
+ // therefore also needs to be propagated to the ObjCInterfaceDecl.
+ if (!LangOpts.ObjCRuntime.isFragile())
+ IDecl->makeDeclVisibleInContext(ivars[i]);
ImpDecl->addDecl(ivars[i]);
}
@@ -3120,6 +3130,9 @@ Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
IdentLocs[i]);
IDecl->setAtEndRange(IdentLocs[i]);
+ if (PrevIDecl)
+ mergeDeclAttributes(IDecl, PrevIDecl);
+
PushOnScopeChains(IDecl, TUScope);
CheckObjCDeclScope(IDecl);
DeclsInGroup.push_back(IDecl);
@@ -3899,6 +3912,55 @@ static void DiagnoseVariableSizedIvars(Sema &S, ObjCContainerDecl *OCD) {
}
}
+static void DiagnoseCategoryDirectMembersProtocolConformance(
+ Sema &S, ObjCProtocolDecl *PDecl, ObjCCategoryDecl *CDecl);
+
+static void DiagnoseCategoryDirectMembersProtocolConformance(
+ Sema &S, ObjCCategoryDecl *CDecl,
+ const llvm::iterator_range<ObjCProtocolList::iterator> &Protocols) {
+ for (auto *PI : Protocols)
+ DiagnoseCategoryDirectMembersProtocolConformance(S, PI, CDecl);
+}
+
+static void DiagnoseCategoryDirectMembersProtocolConformance(
+ Sema &S, ObjCProtocolDecl *PDecl, ObjCCategoryDecl *CDecl) {
+ if (!PDecl->isThisDeclarationADefinition() && PDecl->getDefinition())
+ PDecl = PDecl->getDefinition();
+
+ llvm::SmallVector<const Decl *, 4> DirectMembers;
+ const auto *IDecl = CDecl->getClassInterface();
+ for (auto *MD : PDecl->methods()) {
+ if (!MD->isPropertyAccessor()) {
+ if (const auto *CMD =
+ IDecl->getMethod(MD->getSelector(), MD->isInstanceMethod())) {
+ if (CMD->isDirectMethod())
+ DirectMembers.push_back(CMD);
+ }
+ }
+ }
+ for (auto *PD : PDecl->properties()) {
+ if (const auto *CPD = IDecl->FindPropertyVisibleInPrimaryClass(
+ PD->getIdentifier(),
+ PD->isClassProperty()
+ ? ObjCPropertyQueryKind::OBJC_PR_query_class
+ : ObjCPropertyQueryKind::OBJC_PR_query_instance)) {
+ if (CPD->isDirectProperty())
+ DirectMembers.push_back(CPD);
+ }
+ }
+ if (!DirectMembers.empty()) {
+ S.Diag(CDecl->getLocation(), diag::err_objc_direct_protocol_conformance)
+ << CDecl->IsClassExtension() << CDecl << PDecl << IDecl;
+ for (const auto *MD : DirectMembers)
+ S.Diag(MD->getLocation(), diag::note_direct_member_here);
+ return;
+ }
+
+ // Check on this protocols's referenced protocols, recursively.
+ DiagnoseCategoryDirectMembersProtocolConformance(S, CDecl,
+ PDecl->protocols());
+}
+
// Note: For class/category implementations, allMethods is always null.
Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
ArrayRef<DeclGroupPtrTy> allTUVars) {
@@ -3922,15 +3984,11 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
if (auto *OID = dyn_cast<ObjCImplementationDecl>(CurContext)) {
for (auto PropImpl : OID->property_impls()) {
if (auto *Getter = PropImpl->getGetterMethodDecl())
- if (Getter->isSynthesizedAccessorStub()) {
- OID->makeDeclVisibleInContext(Getter);
+ if (Getter->isSynthesizedAccessorStub())
OID->addDecl(Getter);
- }
if (auto *Setter = PropImpl->getSetterMethodDecl())
- if (Setter->isSynthesizedAccessorStub()) {
- OID->makeDeclVisibleInContext(Setter);
+ if (Setter->isSynthesizedAccessorStub())
OID->addDecl(Setter);
- }
}
}
@@ -4003,6 +4061,8 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
ObjCInterfaceDecl *CCPrimary = C->getClassInterface();
DiagnoseClassExtensionDupMethods(C, CCPrimary);
}
+
+ DiagnoseCategoryDirectMembersProtocolConformance(*this, C, C->protocols());
}
if (ObjCContainerDecl *CDecl = dyn_cast<ObjCContainerDecl>(ClassDecl)) {
if (CDecl->getIdentifier())
diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp
index d7695f9d7d7a..851e28741e49 100644
--- a/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -99,9 +99,7 @@ ExprResult Sema::ActOnNoexceptSpec(SourceLocation NoexceptLoc,
llvm::APSInt Result;
Converted = VerifyIntegerConstantExpression(
- Converted.get(), &Result,
- diag::err_noexcept_needs_constant_expression,
- /*AllowFold*/ false);
+ Converted.get(), &Result, diag::err_noexcept_needs_constant_expression);
if (!Converted.isInvalid())
EST = !Result ? EST_NoexceptFalse : EST_NoexceptTrue;
return Converted;
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index ccae79636f32..45616dadcbee 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -24,10 +24,10 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
-#include "clang/Basic/FixedPoint.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -94,7 +94,7 @@ static void DiagnoseUnusedOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc) {
A->getSemanticSpelling() != UnusedAttr::C2x_maybe_unused) {
const Decl *DC = cast_or_null<Decl>(S.getCurObjCLexicalContext());
if (DC && !DC->hasAttr<UnusedAttr>())
- S.Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName();
+ S.Diag(Loc, diag::warn_used_but_marked_unused) << D;
}
}
}
@@ -339,11 +339,10 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
// List-items in map clauses on this construct may only refer to the declared
// variable var and entities that could be referenced by a procedure defined
// at the same location
- auto *DMD = dyn_cast<OMPDeclareMapperDecl>(CurContext);
- if (LangOpts.OpenMP && DMD && !CurContext->containsDecl(D) &&
- isa<VarDecl>(D)) {
+ if (LangOpts.OpenMP && isa<VarDecl>(D) &&
+ !isOpenMPDeclareMapperVarDeclAllowed(cast<VarDecl>(D))) {
Diag(Loc, diag::err_omp_declare_mapper_wrong_var)
- << DMD->getVarName().getAsString();
+ << getOpenMPDeclareMapperVarName();
Diag(D->getLocation(), diag::note_entity_declared_at) << D;
return true;
}
@@ -355,6 +354,24 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc);
+ // CUDA/HIP: Diagnose invalid references of host global variables in device
+ // functions. Reference of device global variables in host functions is
+ // allowed through shadow variables therefore it is not diagnosed.
+ if (LangOpts.CUDAIsDevice) {
+ auto *FD = dyn_cast_or_null<FunctionDecl>(CurContext);
+ auto Target = IdentifyCUDATarget(FD);
+ if (FD && Target != CFT_Host) {
+ const auto *VD = dyn_cast<VarDecl>(D);
+ if (VD && VD->hasGlobalStorage() && !VD->hasAttr<CUDADeviceAttr>() &&
+ !VD->hasAttr<CUDAConstantAttr>() && !VD->hasAttr<CUDASharedAttr>() &&
+ !VD->getType()->isCUDADeviceBuiltinSurfaceType() &&
+ !VD->getType()->isCUDADeviceBuiltinTextureType() &&
+ !VD->isConstexpr() && !VD->getType().isConstQualified())
+ targetDiag(*Locs.begin(), diag::err_ref_bad_target)
+ << /*host*/ 2 << /*variable*/ 1 << VD << Target;
+ }
+ }
+
if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)) {
if (const auto *VD = dyn_cast<ValueDecl>(D))
checkDeviceDecl(VD, Loc);
@@ -697,7 +714,8 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
// C++ [conv.lval]p3:
// If T is cv std::nullptr_t, the result is a null pointer constant.
CastKind CK = T->isNullPtrType() ? CK_NullToPointer : CK_LValueToRValue;
- Res = ImplicitCastExpr::Create(Context, T, CK, E, nullptr, VK_RValue);
+ Res = ImplicitCastExpr::Create(Context, T, CK, E, nullptr, VK_RValue,
+ CurFPFeatureOverrides());
// C11 6.3.2.1p2:
// ... if the lvalue has atomic type, the value has the non-atomic version
@@ -705,7 +723,7 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
if (const AtomicType *Atomic = T->getAs<AtomicType>()) {
T = Atomic->getValueType().getUnqualifiedType();
Res = ImplicitCastExpr::Create(Context, T, CK_AtomicToNonAtomic, Res.get(),
- nullptr, VK_RValue);
+ nullptr, VK_RValue, FPOptionsOverride());
}
return Res;
@@ -1126,6 +1144,17 @@ static QualType handleFloatConversion(Sema &S, ExprResult &LHS,
bool LHSFloat = LHSType->isRealFloatingType();
bool RHSFloat = RHSType->isRealFloatingType();
+ // N1169 4.1.4: If one of the operands has a floating type and the other
+ // operand has a fixed-point type, the fixed-point operand
+ // is converted to the floating type [...]
+ if (LHSType->isFixedPointType() || RHSType->isFixedPointType()) {
+ if (LHSFloat)
+ RHS = S.ImpCastExprToType(RHS.get(), LHSType, CK_FixedPointToFloating);
+ else if (!IsCompAssign)
+ LHS = S.ImpCastExprToType(LHS.get(), RHSType, CK_FixedPointToFloating);
+ return LHSFloat ? LHSType : RHSType;
+ }
+
// If we have two real floating types, convert the smaller operand
// to the bigger result.
if (LHSFloat && RHSFloat) {
@@ -1152,8 +1181,8 @@ static QualType handleFloatConversion(Sema &S, ExprResult &LHS,
}
assert(RHSFloat);
return handleIntToFloatConversion(S, RHS, LHS, RHSType, LHSType,
- /*convertInt=*/ true,
- /*convertFloat=*/!IsCompAssign);
+ /*ConvertFloat=*/ true,
+ /*ConvertInt=*/!IsCompAssign);
}
/// Diagnose attempts to convert between __float128 and long double if
@@ -1747,7 +1776,7 @@ static ExprResult BuildCookedLiteralOperatorCall(Sema &S, Scope *Scope,
LookupResult R(S, OpName, UDSuffixLoc, Sema::LookupOrdinaryName);
if (S.LookupLiteralOperator(Scope, R, llvm::makeArrayRef(ArgTy, Args.size()),
/*AllowRaw*/ false, /*AllowTemplate*/ false,
- /*AllowStringTemplate*/ false,
+ /*AllowStringTemplatePack*/ false,
/*DiagnoseMissing*/ true) == Sema::LOLR_Error)
return ExprError();
@@ -1852,9 +1881,9 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
LookupResult R(*this, OpName, UDSuffixLoc, LookupOrdinaryName);
switch (LookupLiteralOperator(UDLScope, R, ArgTy,
- /*AllowRaw*/ false, /*AllowTemplate*/ false,
- /*AllowStringTemplate*/ true,
- /*DiagnoseMissing*/ true)) {
+ /*AllowRaw*/ false, /*AllowTemplate*/ true,
+ /*AllowStringTemplatePack*/ true,
+ /*DiagnoseMissing*/ true, Lit)) {
case LOLR_Cooked: {
llvm::APInt Len(Context.getIntWidth(SizeType), Literal.GetNumStringChars());
@@ -1865,7 +1894,16 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
return BuildLiteralOperatorCall(R, OpNameInfo, Args, StringTokLocs.back());
}
- case LOLR_StringTemplate: {
+ case LOLR_Template: {
+ TemplateArgumentListInfo ExplicitArgs;
+ TemplateArgument Arg(Lit);
+ TemplateArgumentLocInfo ArgInfo(Lit);
+ ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo));
+ return BuildLiteralOperatorCall(R, OpNameInfo, None, StringTokLocs.back(),
+ &ExplicitArgs);
+ }
+
+ case LOLR_StringTemplatePack: {
TemplateArgumentListInfo ExplicitArgs;
unsigned CharBits = Context.getIntWidth(CharTy);
@@ -1886,7 +1924,6 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
&ExplicitArgs);
}
case LOLR_Raw:
- case LOLR_Template:
case LOLR_ErrorNoDiagnostic:
llvm_unreachable("unexpected literal operator lookup result");
case LOLR_Error:
@@ -1915,6 +1952,35 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
TemplateArgs);
}
+// CUDA/HIP: Check whether a captured reference variable is referencing a
+// host variable in a device or host device lambda.
+static bool isCapturingReferenceToHostVarInCUDADeviceLambda(const Sema &S,
+ VarDecl *VD) {
+ if (!S.getLangOpts().CUDA || !VD->hasInit())
+ return false;
+ assert(VD->getType()->isReferenceType());
+
+ // Check whether the reference variable is referencing a host variable.
+ auto *DRE = dyn_cast<DeclRefExpr>(VD->getInit());
+ if (!DRE)
+ return false;
+ auto *Referee = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!Referee || !Referee->hasGlobalStorage() ||
+ Referee->hasAttr<CUDADeviceAttr>())
+ return false;
+
+ // Check whether the current function is a device or host device lambda.
+ // Check whether the reference variable is a capture by getDeclContext()
+ // since refersToEnclosingVariableOrCapture() is not ready at this point.
+ auto *MD = dyn_cast_or_null<CXXMethodDecl>(S.CurContext);
+ if (MD && MD->getParent()->isLambda() &&
+ MD->getOverloadedOperator() == OO_Call && MD->hasAttr<CUDADeviceAttr>() &&
+ VD->getDeclContext() != MD)
+ return true;
+
+ return false;
+}
+
NonOdrUseReason Sema::getNonOdrUseReasonInCurrentContext(ValueDecl *D) {
// A declaration named in an unevaluated operand never constitutes an odr-use.
if (isUnevaluatedContext())
@@ -1924,9 +1990,16 @@ NonOdrUseReason Sema::getNonOdrUseReasonInCurrentContext(ValueDecl *D) {
// A variable x whose name appears as a potentially-evaluated expression e
// is odr-used by e unless [...] x is a reference that is usable in
// constant expressions.
+ // CUDA/HIP:
+ // If a reference variable referencing a host variable is captured in a
+ // device or host device lambda, the value of the referee must be copied
+ // to the capture and the reference variable must be treated as odr-use
+ // since the value of the referee is not known at compile time and must
+ // be loaded from the captured.
if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->getType()->isReferenceType() &&
!(getLangOpts().OpenMP && isOpenMPCapturedDecl(D)) &&
+ !isCapturingReferenceToHostVarInCUDADeviceLambda(*this, VD) &&
VD->isUsableInConstantExpressions(Context))
return NOUR_Constant;
}
@@ -2061,6 +2134,73 @@ static void emitEmptyLookupTypoDiagnostic(
SemaRef.PDiag(NoteID));
}
+/// Diagnose a lookup that found results in an enclosing class during error
+/// recovery. This usually indicates that the results were found in a dependent
+/// base class that could not be searched as part of a template definition.
+/// Always issues a diagnostic (though this may be only a warning in MS
+/// compatibility mode).
+///
+/// Return \c true if the error is unrecoverable, or \c false if the caller
+/// should attempt to recover using these lookup results.
+bool Sema::DiagnoseDependentMemberLookup(LookupResult &R) {
+ // During a default argument instantiation the CurContext points
+ // to a CXXMethodDecl; but we can't apply a this-> fixit inside a
+ // function parameter list, hence add an explicit check.
+ bool isDefaultArgument =
+ !CodeSynthesisContexts.empty() &&
+ CodeSynthesisContexts.back().Kind ==
+ CodeSynthesisContext::DefaultFunctionArgumentInstantiation;
+ CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext);
+ bool isInstance = CurMethod && CurMethod->isInstance() &&
+ R.getNamingClass() == CurMethod->getParent() &&
+ !isDefaultArgument;
+
+ // There are two ways we can find a class-scope declaration during template
+ // instantiation that we did not find in the template definition: if it is a
+ // member of a dependent base class, or if it is declared after the point of
+ // use in the same class. Distinguish these by comparing the class in which
+ // the member was found to the naming class of the lookup.
+ unsigned DiagID = diag::err_found_in_dependent_base;
+ unsigned NoteID = diag::note_member_declared_at;
+ if (R.getRepresentativeDecl()->getDeclContext()->Equals(R.getNamingClass())) {
+ DiagID = getLangOpts().MSVCCompat ? diag::ext_found_later_in_class
+ : diag::err_found_later_in_class;
+ } else if (getLangOpts().MSVCCompat) {
+ DiagID = diag::ext_found_in_dependent_base;
+ NoteID = diag::note_dependent_member_use;
+ }
+
+ if (isInstance) {
+ // Give a code modification hint to insert 'this->'.
+ Diag(R.getNameLoc(), DiagID)
+ << R.getLookupName()
+ << FixItHint::CreateInsertion(R.getNameLoc(), "this->");
+ CheckCXXThisCapture(R.getNameLoc());
+ } else {
+ // FIXME: Add a FixItHint to insert 'Base::' or 'Derived::' (assuming
+ // they're not shadowed).
+ Diag(R.getNameLoc(), DiagID) << R.getLookupName();
+ }
+
+ for (NamedDecl *D : R)
+ Diag(D->getLocation(), NoteID);
+
+ // Return true if we are inside a default argument instantiation
+ // and the found name refers to an instance member function, otherwise
+ // the caller will try to create an implicit member call and this is wrong
+ // for default arguments.
+ //
+ // FIXME: Is this special case necessary? We could allow the caller to
+ // diagnose this.
+ if (isDefaultArgument && ((*R.begin())->isCXXInstanceMember())) {
+ Diag(R.getNameLoc(), diag::err_member_call_without_object);
+ return true;
+ }
+
+ // Tell the callee to try to recover.
+ return false;
+}
+
/// Diagnose an empty lookup.
///
/// \return false if new lookup candidates were found
@@ -2092,46 +2232,20 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// Don't give errors about ambiguities in this lookup.
R.suppressDiagnostics();
- // During a default argument instantiation the CurContext points
- // to a CXXMethodDecl; but we can't apply a this-> fixit inside a
- // function parameter list, hence add an explicit check.
- bool isDefaultArgument =
- !CodeSynthesisContexts.empty() &&
- CodeSynthesisContexts.back().Kind ==
- CodeSynthesisContext::DefaultFunctionArgumentInstantiation;
- CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext);
- bool isInstance = CurMethod &&
- CurMethod->isInstance() &&
- DC == CurMethod->getParent() && !isDefaultArgument;
-
- // Give a code modification hint to insert 'this->'.
- // TODO: fixit for inserting 'Base<T>::' in the other cases.
- // Actually quite difficult!
- if (getLangOpts().MSVCCompat)
- diagnostic = diag::ext_found_via_dependent_bases_lookup;
- if (isInstance) {
- Diag(R.getNameLoc(), diagnostic) << Name
- << FixItHint::CreateInsertion(R.getNameLoc(), "this->");
- CheckCXXThisCapture(R.getNameLoc());
- } else {
- Diag(R.getNameLoc(), diagnostic) << Name;
- }
-
- // Do we really want to note all of these?
- for (NamedDecl *D : R)
- Diag(D->getLocation(), diag::note_dependent_var_use);
-
- // Return true if we are inside a default argument instantiation
- // and the found name refers to an instance member function, otherwise
- // the function calling DiagnoseEmptyLookup will try to create an
- // implicit member call and this is wrong for default argument.
- if (isDefaultArgument && ((*R.begin())->isCXXInstanceMember())) {
- Diag(R.getNameLoc(), diag::err_member_call_without_object);
- return true;
+ // If there's a best viable function among the results, only mention
+ // that one in the notes.
+ OverloadCandidateSet Candidates(R.getNameLoc(),
+ OverloadCandidateSet::CSK_Normal);
+ AddOverloadedCallCandidates(R, ExplicitTemplateArgs, Args, Candidates);
+ OverloadCandidateSet::iterator Best;
+ if (Candidates.BestViableFunction(*this, R.getNameLoc(), Best) ==
+ OR_Success) {
+ R.clear();
+ R.addDecl(Best->FoundDecl.getDecl(), Best->FoundDecl.getAccess());
+ R.resolveKind();
}
- // Tell the callee to try to recover.
- return false;
+ return DiagnoseDependentMemberLookup(R);
}
R.clear();
@@ -2577,6 +2691,13 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
NameInfo, /*TemplateArgs=*/nullptr);
if (R.empty()) {
+ // Don't diagnose problems with invalid record decl, the secondary no_member
+ // diagnostic during template instantiation is likely bogus, e.g. if a class
+ // is invalid because it's derived from an invalid base class, then missing
+ // members were likely supposed to be inherited.
+ if (const auto *CD = dyn_cast<CXXRecordDecl>(DC))
+ if (CD->isInvalidDecl())
+ return ExprError();
Diag(NameInfo.getLoc(), diag::err_no_member)
<< NameInfo.getName() << DC << SS.getRange();
return ExprError();
@@ -2730,8 +2851,7 @@ ExprResult Sema::BuildIvarRefExpr(Scope *S, SourceLocation Loc,
// turn this into Self->ivar, just return a BareIVarExpr or something.
IdentifierInfo &II = Context.Idents.get("self");
UnqualifiedId SelfName;
- SelfName.setIdentifier(&II, SourceLocation());
- SelfName.setKind(UnqualifiedIdKind::IK_ImplicitSelfParam);
+ SelfName.setImplicitSelfParam(&II);
CXXScopeSpec SelfScopeSpec;
SourceLocation TemplateKWLoc;
ExprResult SelfExpr =
@@ -2792,21 +2912,24 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
/// Cast a base object to a member's actual type.
///
-/// Logically this happens in three phases:
+/// There are two relevant checks:
+///
+/// C++ [class.access.base]p7:
///
-/// * First we cast from the base type to the naming class.
-/// The naming class is the class into which we were looking
-/// when we found the member; it's the qualifier type if a
-/// qualifier was provided, and otherwise it's the base type.
+/// If a class member access operator [...] is used to access a non-static
+/// data member or non-static member function, the reference is ill-formed if
+/// the left operand [...] cannot be implicitly converted to a pointer to the
+/// naming class of the right operand.
///
-/// * Next we cast from the naming class to the declaring class.
-/// If the member we found was brought into a class's scope by
-/// a using declaration, this is that class; otherwise it's
-/// the class declaring the member.
+/// C++ [expr.ref]p7:
///
-/// * Finally we cast from the declaring class to the "true"
-/// declaring class of the member. This conversion does not
-/// obey access control.
+/// If E2 is a non-static data member or a non-static member function, the
+/// program is ill-formed if the class of which E2 is directly a member is an
+/// ambiguous base (11.8) of the naming class (11.9.3) of E2.
+///
+/// Note that the latter check does not consider access; the access of the
+/// "real" base class is checked as appropriate when checking the access of the
+/// member name.
ExprResult
Sema::PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
@@ -2930,45 +3053,10 @@ Sema::PerformObjectMemberConversion(Expr *From,
}
}
- bool IgnoreAccess = false;
-
- // If we actually found the member through a using declaration, cast
- // down to the using declaration's type.
- //
- // Pointer equality is fine here because only one declaration of a
- // class ever has member declarations.
- if (FoundDecl->getDeclContext() != Member->getDeclContext()) {
- assert(isa<UsingShadowDecl>(FoundDecl));
- QualType URecordType = Context.getTypeDeclType(
- cast<CXXRecordDecl>(FoundDecl->getDeclContext()));
-
- // We only need to do this if the naming-class to declaring-class
- // conversion is non-trivial.
- if (!Context.hasSameUnqualifiedType(FromRecordType, URecordType)) {
- assert(IsDerivedFrom(FromLoc, FromRecordType, URecordType));
- CXXCastPath BasePath;
- if (CheckDerivedToBaseConversion(FromRecordType, URecordType,
- FromLoc, FromRange, &BasePath))
- return ExprError();
-
- QualType UType = URecordType;
- if (PointerConversions)
- UType = Context.getPointerType(UType);
- From = ImpCastExprToType(From, UType, CK_UncheckedDerivedToBase,
- VK, &BasePath).get();
- FromType = UType;
- FromRecordType = URecordType;
- }
-
- // We don't do access control for the conversion from the
- // declaring class to the true declaring class.
- IgnoreAccess = true;
- }
-
CXXCastPath BasePath;
if (CheckDerivedToBaseConversion(FromRecordType, DestRecordType,
FromLoc, FromRange, &BasePath,
- IgnoreAccess))
+ /*IgnoreAccess=*/true))
return ExprError();
return ImpCastExprToType(From, DestType, CK_UncheckedDerivedToBase,
@@ -3208,6 +3296,17 @@ ExprResult Sema::BuildDeclarationNameExpr(
break;
}
+ // [expr.prim.id.unqual]p2:
+ // If the entity is a template parameter object for a template
+ // parameter of type T, the type of the expression is const T.
+ // [...] The expression is an lvalue if the entity is a [...] template
+ // parameter object.
+ if (type->isRecordType()) {
+ type = type.getUnqualifiedType().withConst();
+ valueKind = VK_LValue;
+ break;
+ }
+
// For non-references, we need to strip qualifiers just in case
// the template parameter was declared as 'const int' or whatever.
valueKind = VK_RValue;
@@ -3307,8 +3406,9 @@ ExprResult Sema::BuildDeclarationNameExpr(
case Decl::MSProperty:
case Decl::MSGuid:
- // FIXME: Should MSGuidDecl be subject to capture in OpenMP,
- // or duplicated between host and device?
+ case Decl::TemplateParamObject:
+ // FIXME: Should MSGuidDecl and template parameter objects be subject to
+ // capture in OpenMP, or duplicated between host and device?
valueKind = VK_LValue;
break;
@@ -3409,70 +3509,6 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
return PredefinedExpr::Create(Context, Loc, ResTy, IK, SL);
}
-static std::pair<QualType, StringLiteral *>
-GetUniqueStableNameInfo(ASTContext &Context, QualType OpType,
- SourceLocation OpLoc, PredefinedExpr::IdentKind K) {
- std::pair<QualType, StringLiteral*> Result{{}, nullptr};
-
- if (OpType->isDependentType()) {
- Result.first = Context.DependentTy;
- return Result;
- }
-
- std::string Str = PredefinedExpr::ComputeName(Context, K, OpType);
- llvm::APInt Length(32, Str.length() + 1);
- Result.first =
- Context.adjustStringLiteralBaseType(Context.CharTy.withConst());
- Result.first = Context.getConstantArrayType(
- Result.first, Length, nullptr, ArrayType::Normal, /*IndexTypeQuals*/ 0);
- Result.second = StringLiteral::Create(Context, Str, StringLiteral::Ascii,
- /*Pascal*/ false, Result.first, OpLoc);
- return Result;
-}
-
-ExprResult Sema::BuildUniqueStableName(SourceLocation OpLoc,
- TypeSourceInfo *Operand) {
- QualType ResultTy;
- StringLiteral *SL;
- std::tie(ResultTy, SL) = GetUniqueStableNameInfo(
- Context, Operand->getType(), OpLoc, PredefinedExpr::UniqueStableNameType);
-
- return PredefinedExpr::Create(Context, OpLoc, ResultTy,
- PredefinedExpr::UniqueStableNameType, SL,
- Operand);
-}
-
-ExprResult Sema::BuildUniqueStableName(SourceLocation OpLoc,
- Expr *E) {
- QualType ResultTy;
- StringLiteral *SL;
- std::tie(ResultTy, SL) = GetUniqueStableNameInfo(
- Context, E->getType(), OpLoc, PredefinedExpr::UniqueStableNameExpr);
-
- return PredefinedExpr::Create(Context, OpLoc, ResultTy,
- PredefinedExpr::UniqueStableNameExpr, SL, E);
-}
-
-ExprResult Sema::ActOnUniqueStableNameExpr(SourceLocation OpLoc,
- SourceLocation L, SourceLocation R,
- ParsedType Ty) {
- TypeSourceInfo *TInfo = nullptr;
- QualType T = GetTypeFromParser(Ty, &TInfo);
-
- if (T.isNull())
- return ExprError();
- if (!TInfo)
- TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
-
- return BuildUniqueStableName(OpLoc, TInfo);
-}
-
-ExprResult Sema::ActOnUniqueStableNameExpr(SourceLocation OpLoc,
- SourceLocation L, SourceLocation R,
- Expr *E) {
- return BuildUniqueStableName(OpLoc, E);
-}
-
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
PredefinedExpr::IdentKind IK;
@@ -3675,7 +3711,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
LookupResult R(*this, OpName, UDSuffixLoc, LookupOrdinaryName);
switch (LookupLiteralOperator(UDLScope, R, CookedTy,
/*AllowRaw*/ true, /*AllowTemplate*/ true,
- /*AllowStringTemplate*/ false,
+ /*AllowStringTemplatePack*/ false,
/*DiagnoseMissing*/ !Literal.isImaginary)) {
case LOLR_ErrorNoDiagnostic:
// Lookup failure for imaginary constants isn't fatal, there's still the
@@ -3730,7 +3766,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
return BuildLiteralOperatorCall(R, OpNameInfo, None, TokLoc,
&ExplicitArgs);
}
- case LOLR_StringTemplate:
+ case LOLR_StringTemplatePack:
llvm_unreachable("unexpected literal operator lookup result");
}
}
@@ -3804,8 +3840,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
if (Ty == Context.DoubleTy) {
if (getLangOpts().SinglePrecisionConstants) {
- const BuiltinType *BTy = Ty->getAs<BuiltinType>();
- if (BTy->getKind() != BuiltinType::Float) {
+ if (Ty->castAs<BuiltinType>()->getKind() != BuiltinType::Float) {
Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get();
}
} else if (getLangOpts().OpenCL &&
@@ -4053,7 +4088,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
bool IsUnevaluatedOperand =
(ExprKind == UETT_SizeOf || ExprKind == UETT_AlignOf ||
- ExprKind == UETT_PreferredAlignOf);
+ ExprKind == UETT_PreferredAlignOf || ExprKind == UETT_VecStep);
if (IsUnevaluatedOperand) {
ExprResult Result = CheckUnevaluatedOperand(E);
if (Result.isInvalid())
@@ -4061,6 +4096,16 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
E = Result.get();
}
+ // The operand for sizeof and alignof is in an unevaluated expression context,
+ // so side effects could result in unintended consequences.
+ // Exclude instantiation-dependent expressions, because 'sizeof' is sometimes
+ // used to build SFINAE gadgets.
+ // FIXME: Should we consider instantiation-dependent operands to 'alignof'?
+ if (IsUnevaluatedOperand && !inTemplateInstantiation() &&
+ !E->isInstantiationDependent() &&
+ E->HasSideEffects(Context, false))
+ Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
+
if (ExprKind == UETT_VecStep)
return CheckVecStepTraitOperandType(*this, ExprTy, E->getExprLoc(),
E->getSourceRange());
@@ -4097,12 +4142,6 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return true;
}
- // The operand for sizeof and alignof is in an unevaluated expression context,
- // so side effects could result in unintended consequences.
- if (IsUnevaluatedOperand && !inTemplateInstantiation() &&
- E->HasSideEffects(Context, false))
- Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
-
if (CheckObjCTraitOperandConstraints(*this, ExprTy, E->getExprLoc(),
E->getSourceRange(), ExprKind))
return true;
@@ -4345,7 +4384,6 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::UnaryTransform:
case Type::Attributed:
case Type::SubstTemplateTypeParm:
- case Type::PackExpansion:
case Type::MacroQualified:
// Keep walking after single level desugaring.
T = T.getSingleStepDesugaredType(Context);
@@ -4593,8 +4631,8 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
<< SourceRange(base->getBeginLoc(), rbLoc);
return ExprError();
}
- // If the base is either a MatrixSubscriptExpr or a matrix type, try to create
- // a new MatrixSubscriptExpr.
+ // If the base is a MatrixSubscriptExpr, try to create a new
+ // MatrixSubscriptExpr.
auto *matSubscriptE = dyn_cast<MatrixSubscriptExpr>(base);
if (matSubscriptE) {
if (CheckAndReportCommaError(idx))
@@ -4605,34 +4643,13 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
return CreateBuiltinMatrixSubscriptExpr(
matSubscriptE->getBase(), matSubscriptE->getRowIdx(), idx, rbLoc);
}
- Expr *matrixBase = base;
- bool IsMSPropertySubscript = isMSPropertySubscriptExpr(*this, base);
- if (!IsMSPropertySubscript) {
- ExprResult result = CheckPlaceholderExpr(base);
- if (!result.isInvalid())
- matrixBase = result.get();
- }
- if (matrixBase->getType()->isMatrixType()) {
- if (CheckAndReportCommaError(idx))
- return ExprError();
-
- return CreateBuiltinMatrixSubscriptExpr(matrixBase, idx, nullptr, rbLoc);
- }
-
- // A comma-expression as the index is deprecated in C++2a onwards.
- if (getLangOpts().CPlusPlus20 &&
- ((isa<BinaryOperator>(idx) && cast<BinaryOperator>(idx)->isCommaOp()) ||
- (isa<CXXOperatorCallExpr>(idx) &&
- cast<CXXOperatorCallExpr>(idx)->getOperator() == OO_Comma))) {
- Diag(idx->getExprLoc(), diag::warn_deprecated_comma_subscript)
- << SourceRange(base->getBeginLoc(), rbLoc);
- }
// Handle any non-overload placeholder types in the base and index
// expressions. We can't handle overloads here because the other
// operand might be an overloadable type, in which case the overload
// resolution for the operator overload should get the first crack
// at the overload.
+ bool IsMSPropertySubscript = false;
if (base->getType()->isNonOverloadPlaceholderType()) {
IsMSPropertySubscript = isMSPropertySubscriptExpr(*this, base);
if (!IsMSPropertySubscript) {
@@ -4642,6 +4659,24 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
base = result.get();
}
}
+
+ // If the base is a matrix type, try to create a new MatrixSubscriptExpr.
+ if (base->getType()->isMatrixType()) {
+ if (CheckAndReportCommaError(idx))
+ return ExprError();
+
+ return CreateBuiltinMatrixSubscriptExpr(base, idx, nullptr, rbLoc);
+ }
+
+ // A comma-expression as the index is deprecated in C++2a onwards.
+ if (getLangOpts().CPlusPlus20 &&
+ ((isa<BinaryOperator>(idx) && cast<BinaryOperator>(idx)->isCommaOp()) ||
+ (isa<CXXOperatorCallExpr>(idx) &&
+ cast<CXXOperatorCallExpr>(idx)->getOperator() == OO_Comma))) {
+ Diag(idx->getExprLoc(), diag::warn_deprecated_comma_subscript)
+ << SourceRange(base->getBeginLoc(), rbLoc);
+ }
+
if (idx->getType()->isNonOverloadPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(idx);
if (result.isInvalid()) return ExprError();
@@ -4741,12 +4776,13 @@ ExprResult Sema::CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
return nullptr;
}
- llvm::APSInt Idx;
- if (IndexExpr->isIntegerConstantExpr(Idx, Context) &&
- (Idx < 0 || Idx >= Dim)) {
- Diag(IndexExpr->getBeginLoc(), diag::err_matrix_index_outside_range)
- << IsColumnIdx << Dim;
- return nullptr;
+ if (Optional<llvm::APSInt> Idx =
+ IndexExpr->getIntegerConstantExpr(Context)) {
+ if ((*Idx < 0 || *Idx >= Dim)) {
+ Diag(IndexExpr->getBeginLoc(), diag::err_matrix_index_outside_range)
+ << IsColumnIdx << Dim;
+ return nullptr;
+ }
}
ExprResult ConvExpr =
@@ -4780,6 +4816,9 @@ void Sema::CheckAddressOfNoDeref(const Expr *E) {
}
void Sema::CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E) {
+ if (isUnevaluatedContext())
+ return;
+
QualType ResultTy = E->getType();
ExpressionEvaluationContextRecord &LastRecord = ExprEvalContexts.back();
@@ -5162,12 +5201,11 @@ ExprResult Sema::ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
IsCorrect = false;
continue;
}
- llvm::APSInt Result;
- bool IsConstant = Step->isIntegerConstantExpr(Result, Context);
+ Optional<llvm::APSInt> Result = Step->getIntegerConstantExpr(Context);
// OpenMP 5.0, 2.1.6 Iterators, Restrictions
// If the step expression of a range-specification equals zero, the
// behavior is unspecified.
- if (IsConstant && Result.isNullValue()) {
+ if (Result && Result->isNullValue()) {
Diag(Step->getExprLoc(), diag::err_omp_iterator_step_constant_zero)
<< Step << Step->getSourceRange();
IsCorrect = false;
@@ -5567,9 +5605,8 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
return true;
}
- Diag(CallLoc,
- diag::err_use_of_default_argument_to_function_declared_later) <<
- FD << cast<CXXRecordDecl>(FD->getDeclContext())->getDeclName();
+ Diag(CallLoc, diag::err_use_of_default_argument_to_function_declared_later)
+ << FD << cast<CXXRecordDecl>(FD->getDeclContext());
Diag(UnparsedDefaultArgLocs[Param],
diag::note_default_argument_declared_here);
return true;
@@ -6021,6 +6058,9 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
#define PLACEHOLDER_TYPE(ID, SINGLETON_ID)
#define BUILTIN_TYPE(ID, SINGLETON_ID) case BuiltinType::ID:
#include "clang/AST/BuiltinTypes.def"
@@ -6069,8 +6109,6 @@ static bool checkArgsForPlaceholders(Sema &S, MultiExprArg args) {
ExprResult result = S.CheckPlaceholderExpr(args[i]);
if (result.isInvalid()) hasInvalid = true;
else args[i] = result.get();
- } else if (hasInvalid) {
- (void)S.CorrectDelayedTyposInExpr(args[i]);
}
}
return hasInvalid;
@@ -6158,6 +6196,7 @@ static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context,
Params.push_back(Parm);
}
OverloadDecl->setParams(Params);
+ Sema->mergeDeclAttributes(OverloadDecl, FDecl);
return OverloadDecl;
}
@@ -6273,7 +6312,8 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig) {
ExprResult Call =
- BuildCallExpr(Scope, Fn, LParenLoc, ArgExprs, RParenLoc, ExecConfig);
+ BuildCallExpr(Scope, Fn, LParenLoc, ArgExprs, RParenLoc, ExecConfig,
+ /*IsExecConfig=*/false, /*AllowRecovery=*/true);
if (Call.isInvalid())
return Call;
@@ -6301,7 +6341,8 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
/// locations.
ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
- Expr *ExecConfig, bool IsExecConfig) {
+ Expr *ExecConfig, bool IsExecConfig,
+ bool AllowRecovery) {
// Since this might be a postfix expression, get rid of ParenListExprs.
ExprResult Result = MaybeConvertParenListExprToParenExpr(Scope, Fn);
if (Result.isInvalid()) return ExprError();
@@ -6322,7 +6363,7 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
}
return CallExpr::Create(Context, Fn, /*Args=*/{}, Context.VoidTy,
- VK_RValue, RParenLoc);
+ VK_RValue, RParenLoc, CurFPFeatureOverrides());
}
if (Fn->getType() == Context.PseudoObjectTy) {
ExprResult result = CheckPlaceholderExpr(Fn);
@@ -6336,7 +6377,7 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
if (ExecConfig) {
return CUDAKernelCallExpr::Create(
Context, Fn, cast<CallExpr>(ExecConfig), ArgExprs,
- Context.DependentTy, VK_RValue, RParenLoc);
+ Context.DependentTy, VK_RValue, RParenLoc, CurFPFeatureOverrides());
} else {
tryImplicitlyCaptureThisIfImplicitMemberFunctionAccessWithDependentArgs(
@@ -6344,7 +6385,7 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
Fn->getBeginLoc());
return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy,
- VK_RValue, RParenLoc);
+ VK_RValue, RParenLoc, CurFPFeatureOverrides());
}
}
@@ -6361,7 +6402,7 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
if (Fn->getType() == Context.BoundMemberTy) {
return BuildCallToMemberFunction(Scope, Fn, LParenLoc, ArgExprs,
- RParenLoc);
+ RParenLoc, AllowRecovery);
}
}
@@ -6373,14 +6414,14 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
if (!find.HasFormOfMemberPointer) {
if (Expr::hasAnyTypeDependentArguments(ArgExprs))
return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy,
- VK_RValue, RParenLoc);
+ VK_RValue, RParenLoc, CurFPFeatureOverrides());
OverloadExpr *ovl = find.Expression;
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(ovl))
return BuildOverloadedCallExpr(
Scope, Fn, ULE, LParenLoc, ArgExprs, RParenLoc, ExecConfig,
/*AllowTypoCorrection=*/true, find.IsAddressOfOperand);
return BuildCallToMemberFunction(Scope, Fn, LParenLoc, ArgExprs,
- RParenLoc);
+ RParenLoc, AllowRecovery);
}
}
@@ -6433,6 +6474,21 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
checkDirectCallValidity(*this, Fn, FD, ArgExprs);
}
+ if (Context.isDependenceAllowed() &&
+ (Fn->isTypeDependent() || Expr::hasAnyTypeDependentArguments(ArgExprs))) {
+ assert(!getLangOpts().CPlusPlus);
+ assert((Fn->containsErrors() ||
+ llvm::any_of(ArgExprs,
+ [](clang::Expr *E) { return E->containsErrors(); })) &&
+ "should only occur in error-recovery path.");
+ QualType ReturnType =
+ llvm::isa_and_nonnull<FunctionDecl>(NDecl)
+ ? cast<FunctionDecl>(NDecl)->getCallResultType()
+ : Context.DependentTy;
+ return CallExpr::Create(Context, Fn, ArgExprs, ReturnType,
+ Expr::getValueKindForType(ReturnType), RParenLoc,
+ CurFPFeatureOverrides());
+ }
return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, ArgExprs, RParenLoc,
ExecConfig, IsExecConfig);
}
@@ -6564,15 +6620,16 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (Config) {
assert(UsesADL == ADLCallKind::NotADL &&
"CUDAKernelCallExpr should not use ADL");
- TheCall =
- CUDAKernelCallExpr::Create(Context, Fn, cast<CallExpr>(Config), Args,
- ResultTy, VK_RValue, RParenLoc, NumParams);
+ TheCall = CUDAKernelCallExpr::Create(Context, Fn, cast<CallExpr>(Config),
+ Args, ResultTy, VK_RValue, RParenLoc,
+ CurFPFeatureOverrides(), NumParams);
} else {
- TheCall = CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue,
- RParenLoc, NumParams, UsesADL);
+ TheCall =
+ CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue, RParenLoc,
+ CurFPFeatureOverrides(), NumParams, UsesADL);
}
- if (!getLangOpts().CPlusPlus) {
+ if (!Context.isDependenceAllowed()) {
// Forget about the nulled arguments since typo correction
// do not handle them well.
TheCall->shrinkNumArgs(Args.size());
@@ -6596,10 +6653,11 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (Config)
TheCall = CUDAKernelCallExpr::Create(
Context, Fn, cast<CallExpr>(Config), Args, ResultTy, VK_RValue,
- RParenLoc, NumParams);
+ RParenLoc, CurFPFeatureOverrides(), NumParams);
else
- TheCall = CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue,
- RParenLoc, NumParams, UsesADL);
+ TheCall =
+ CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue, RParenLoc,
+ CurFPFeatureOverrides(), NumParams, UsesADL);
}
// We can now handle the nulled arguments for the default arguments.
TheCall->setNumArgsUnsafe(std::max<unsigned>(Args.size(), NumParams));
@@ -6960,9 +7018,9 @@ void Sema::maybeExtendBlockObject(ExprResult &E) {
// Only do this in an r-value context.
if (!getLangOpts().ObjCAutoRefCount) return;
- E = ImplicitCastExpr::Create(Context, E.get()->getType(),
- CK_ARCExtendBlockObject, E.get(),
- /*base path*/ nullptr, VK_RValue);
+ E = ImplicitCastExpr::Create(
+ Context, E.get()->getType(), CK_ARCExtendBlockObject, E.get(),
+ /*base path*/ nullptr, VK_RValue, FPOptionsOverride());
Cleanup.setExprNeedsCleanups(true);
}
@@ -7041,6 +7099,7 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_Integral:
return CK_FixedPointToIntegral;
case Type::STK_Floating:
+ return CK_FixedPointToFloating;
case Type::STK_IntegralComplex:
case Type::STK_FloatingComplex:
Diag(Src.get()->getExprLoc(),
@@ -7113,10 +7172,7 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
case Type::STK_FixedPoint:
- Diag(Src.get()->getExprLoc(),
- diag::err_unimplemented_conversion_with_fixed_point_type)
- << SrcTy;
- return CK_IntegralCast;
+ return CK_FloatingToFixedPoint;
}
llvm_unreachable("Should have returned before this");
@@ -7211,6 +7267,28 @@ static bool breakDownVectorType(QualType type, uint64_t &len,
return true;
}
+/// Are the two types SVE-bitcast-compatible types? I.e. is bitcasting from the
+/// first SVE type (e.g. an SVE VLAT) to the second type (e.g. an SVE VLST)
+/// allowed?
+///
+/// This will also return false if the two given types do not make sense from
+/// the perspective of SVE bitcasts.
+bool Sema::isValidSveBitcast(QualType srcTy, QualType destTy) {
+ assert(srcTy->isVectorType() || destTy->isVectorType());
+
+ auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
+ if (!FirstType->isSizelessBuiltinType())
+ return false;
+
+ const auto *VecTy = SecondType->getAs<VectorType>();
+ return VecTy &&
+ VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector;
+ };
+
+ return ValidScalableConversion(srcTy, destTy) ||
+ ValidScalableConversion(destTy, srcTy);
+}
+
/// Are the two types lax-compatible vector types? That is, given
/// that one of them is a vector, do they have equal storage sizes,
/// where the storage size is the number of elements times the element
@@ -7401,7 +7479,7 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
}
if (PE || PLE->getNumExprs() == 1) {
Expr *E = (PE ? PE->getSubExpr() : PLE->getExpr(0));
- if (!E->getType()->isVectorType())
+ if (!E->isTypeDependent() && !E->getType()->isVectorType())
isVectorLiteral = true;
}
else
@@ -8059,6 +8137,16 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
VK = VK_RValue;
OK = OK_Ordinary;
+ if (Context.isDependenceAllowed() &&
+ (Cond.get()->isTypeDependent() || LHS.get()->isTypeDependent() ||
+ RHS.get()->isTypeDependent())) {
+ assert(!getLangOpts().CPlusPlus);
+ assert((Cond.get()->containsErrors() || LHS.get()->containsErrors() ||
+ RHS.get()->containsErrors()) &&
+ "should only occur in error-recovery path.");
+ return Context.DependentTy;
+ }
+
// The OpenCL operator with a vector condition is sufficiently
// different to merit its own checker.
if ((getLangOpts().OpenCL && Cond.get()->getType()->isVectorType()) ||
@@ -8106,6 +8194,15 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// If both operands have arithmetic type, do the usual arithmetic conversions
// to find a common type: C99 6.5.15p3,5.
if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) {
+ // Disallow invalid arithmetic conversions, such as those between ExtInts of
+ // different sizes, or between ExtInts and other types.
+ if (ResTy.isNull() && (LHSTy->isExtIntType() || RHSTy->isExtIntType())) {
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
LHS = ImpCastExprToType(LHS.get(), ResTy, PrepareScalarCast(LHS, ResTy));
RHS = ImpCastExprToType(RHS.get(), ResTy, PrepareScalarCast(RHS, ResTy));
@@ -8363,7 +8460,7 @@ static bool IsArithmeticBinaryExpr(Expr *E, BinaryOperatorKind *Opcode,
Expr **RHSExprs) {
// Don't strip parenthesis: we should not warn if E is in parenthesis.
E = E->IgnoreImpCasts();
- E = E->IgnoreConversionOperator();
+ E = E->IgnoreConversionOperatorSingleStep();
E = E->IgnoreImpCasts();
if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E)) {
E = MTE->getSubExpr();
@@ -8470,8 +8567,12 @@ static QualType computeConditionalNullability(QualType ResTy, bool IsBin,
auto GetNullability = [&Ctx](QualType Ty) {
Optional<NullabilityKind> Kind = Ty->getNullability(Ctx);
- if (Kind)
+ if (Kind) {
+ // For our purposes, treat _Nullable_result as _Nullable.
+ if (*Kind == NullabilityKind::NullableResult)
+ return NullabilityKind::Nullable;
return *Kind;
+ }
return NullabilityKind::Unspecified;
};
@@ -8516,7 +8617,7 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr) {
- if (!getLangOpts().CPlusPlus) {
+ if (!Context.isDependenceAllowed()) {
// C cannot handle TypoExpr nodes in the condition because it
// doesn't handle dependent types properly, so make sure any TypoExprs have
// been dealt with before checking the operands.
@@ -8997,6 +9098,15 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
}
}
+ // Allow assignments between fixed-length and sizeless SVE vectors.
+ if ((LHSType->isSizelessBuiltinType() && RHSType->isVectorType()) ||
+ (LHSType->isVectorType() && RHSType->isSizelessBuiltinType()))
+ if (Context.areCompatibleSveTypes(LHSType, RHSType) ||
+ Context.areLaxCompatibleSveTypes(LHSType, RHSType)) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+
return Incompatible;
}
@@ -9781,6 +9891,10 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
const VectorType *RHSVecType = RHSType->getAs<VectorType>();
assert(LHSVecType || RHSVecType);
+ if ((LHSVecType && LHSVecType->getElementType()->isBFloat16Type()) ||
+ (RHSVecType && RHSVecType->getElementType()->isBFloat16Type()))
+ return InvalidOperands(Loc, LHS, RHS);
+
// AltiVec-style "vector bool op vector bool" combinations are allowed
// for some operators but not others.
if (!AllowBothBool &&
@@ -9827,6 +9941,44 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
}
}
+ // Expressions containing fixed-length and sizeless SVE vectors are invalid
+ // since the ambiguity can affect the ABI.
+ auto IsSveConversion = [](QualType FirstType, QualType SecondType) {
+ const VectorType *VecType = SecondType->getAs<VectorType>();
+ return FirstType->isSizelessBuiltinType() && VecType &&
+ (VecType->getVectorKind() == VectorType::SveFixedLengthDataVector ||
+ VecType->getVectorKind() ==
+ VectorType::SveFixedLengthPredicateVector);
+ };
+
+ if (IsSveConversion(LHSType, RHSType) || IsSveConversion(RHSType, LHSType)) {
+ Diag(Loc, diag::err_typecheck_sve_ambiguous) << LHSType << RHSType;
+ return QualType();
+ }
+
+ // Expressions containing GNU and SVE (fixed or sizeless) vectors are invalid
+ // since the ambiguity can affect the ABI.
+ auto IsSveGnuConversion = [](QualType FirstType, QualType SecondType) {
+ const VectorType *FirstVecType = FirstType->getAs<VectorType>();
+ const VectorType *SecondVecType = SecondType->getAs<VectorType>();
+
+ if (FirstVecType && SecondVecType)
+ return FirstVecType->getVectorKind() == VectorType::GenericVector &&
+ (SecondVecType->getVectorKind() ==
+ VectorType::SveFixedLengthDataVector ||
+ SecondVecType->getVectorKind() ==
+ VectorType::SveFixedLengthPredicateVector);
+
+ return FirstType->isSizelessBuiltinType() && SecondVecType &&
+ SecondVecType->getVectorKind() == VectorType::GenericVector;
+ };
+
+ if (IsSveGnuConversion(LHSType, RHSType) ||
+ IsSveGnuConversion(RHSType, LHSType)) {
+ Diag(Loc, diag::err_typecheck_sve_gnu_ambiguous) << LHSType << RHSType;
+ return QualType();
+ }
+
// If there's a vector type and a scalar, try to convert the scalar to
// the vector element type and splat.
unsigned DiagID = diag::err_typecheck_vector_not_convertable;
@@ -9981,7 +10133,7 @@ static void DiagnoseDivisionSizeofPointerOrArray(Sema &S, Expr *LHS, Expr *RHS,
QualType RHSTy;
if (RUE->isArgumentType())
- RHSTy = RUE->getArgumentType();
+ RHSTy = RUE->getArgumentType().getNonReferenceType();
else
RHSTy = RUE->getArgumentExpr()->IgnoreParens()->getType();
@@ -9999,7 +10151,7 @@ static void DiagnoseDivisionSizeofPointerOrArray(Sema &S, Expr *LHS, Expr *RHS,
QualType ArrayElemTy = ArrayTy->getElementType();
if (ArrayElemTy != S.Context.getBaseElementType(ArrayTy) ||
ArrayElemTy->isDependentType() || RHSTy->isDependentType() ||
- ArrayElemTy->isCharType() ||
+ RHSTy->isReferenceType() || ArrayElemTy->isCharType() ||
S.Context.getTypeSize(ArrayElemTy) == S.Context.getTypeSize(RHSTy))
return;
S.Diag(Loc, diag::warn_division_sizeof_array)
@@ -10589,9 +10741,13 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
}
QualType LHSExprType = LHS.get()->getType();
- uint64_t LeftSize = LHSExprType->isExtIntType()
- ? S.Context.getIntWidth(LHSExprType)
- : S.Context.getTypeSize(LHSExprType);
+ uint64_t LeftSize = S.Context.getTypeSize(LHSExprType);
+ if (LHSExprType->isExtIntType())
+ LeftSize = S.Context.getIntWidth(LHSExprType);
+ else if (LHSExprType->isFixedPointType()) {
+ auto FXSema = S.Context.getFixedPointSemantics(LHSExprType);
+ LeftSize = FXSema.getWidth() - (unsigned)FXSema.hasUnsignedPadding();
+ }
llvm::APInt LeftBits(Right.getBitWidth(), LeftSize);
if (Right.uge(LeftBits)) {
S.DiagRuntimeBehavior(Loc, RHS.get(),
@@ -10600,7 +10756,8 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
return;
}
- if (Opc != BO_Shl)
+ // FIXME: We probably need to handle fixed point types specially here.
+ if (Opc != BO_Shl || LHSExprType->isFixedPointType())
return;
// When left shifting an ICE which is signed, we can check for overflow which
@@ -10784,7 +10941,9 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
QualType RHSType = RHS.get()->getType();
// C99 6.5.7p2: Each of the operands shall have integer type.
- if (!LHSType->hasIntegerRepresentation() ||
+ // Embedded-C 4.1.6.2.2: The LHS may also be fixed-point.
+ if ((!LHSType->isFixedPointOrIntegerType() &&
+ !LHSType->hasIntegerRepresentation()) ||
!RHSType->hasIntegerRepresentation())
return InvalidOperands(Loc, LHS, RHS);
@@ -13632,7 +13791,7 @@ static std::pair<ExprResult, ExprResult>
CorrectDelayedTyposInBinOp(Sema &S, BinaryOperatorKind Opc, Expr *LHSExpr,
Expr *RHSExpr) {
ExprResult LHS = LHSExpr, RHS = RHSExpr;
- if (!S.getLangOpts().CPlusPlus) {
+ if (!S.Context.isDependenceAllowed()) {
// C cannot handle TypoExpr nodes on either side of a binop because it
// doesn't handle dependent types properly, so make sure any TypoExprs have
// been dealt with before checking the operands.
@@ -13889,9 +14048,10 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
// float vectors and truncating the result back to half vector. For now, we do
// this only when HalfArgsAndReturn is set (that is, when the target is arm or
// arm64).
- assert(isVector(RHS.get()->getType(), Context.HalfTy) ==
- isVector(LHS.get()->getType(), Context.HalfTy) &&
- "both sides are half vectors or neither sides are");
+ assert(
+ (Opc == BO_Comma || isVector(RHS.get()->getType(), Context.HalfTy) ==
+ isVector(LHS.get()->getType(), Context.HalfTy)) &&
+ "both sides are half vectors or neither sides are");
ConvertHalfVec =
needsConversionOfHalfVec(ConvertHalfVec, Context, LHS.get(), RHS.get());
@@ -14169,6 +14329,19 @@ ExprResult Sema::ActOnBinOp(Scope *S, SourceLocation TokLoc,
return BuildBinOp(S, TokLoc, Opc, LHSExpr, RHSExpr);
}
+void Sema::LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
+ UnresolvedSetImpl &Functions) {
+ OverloadedOperatorKind OverOp = BinaryOperator::getOverloadedOperator(Opc);
+ if (OverOp != OO_None && OverOp != OO_Equal)
+ LookupOverloadedOperatorName(OverOp, S, Functions);
+
+ // In C++20 onwards, we may have a second operator to look up.
+ if (getLangOpts().CPlusPlus20) {
+ if (OverloadedOperatorKind ExtraOp = getRewrittenOverloadedOperator(OverOp))
+ LookupOverloadedOperatorName(ExtraOp, S, Functions);
+ }
+}
+
/// Build an overloaded binary operator expression in the given scope.
static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
BinaryOperatorKind Opc,
@@ -14188,23 +14361,9 @@ static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
break;
}
- // Find all of the overloaded operators visible from this
- // point. We perform both an operator-name lookup from the local
- // scope and an argument-dependent lookup based on the types of
- // the arguments.
+ // Find all of the overloaded operators visible from this point.
UnresolvedSet<16> Functions;
- OverloadedOperatorKind OverOp
- = BinaryOperator::getOverloadedOperator(Opc);
- if (Sc && OverOp != OO_None && OverOp != OO_Equal)
- S.LookupOverloadedOperatorName(OverOp, Sc, LHS->getType(),
- RHS->getType(), Functions);
-
- // In C++20 onwards, we may have a second operator to look up.
- if (S.getLangOpts().CPlusPlus20) {
- if (OverloadedOperatorKind ExtraOp = getRewrittenOverloadedOperator(OverOp))
- S.LookupOverloadedOperatorName(ExtraOp, Sc, LHS->getType(),
- RHS->getType(), Functions);
- }
+ S.LookupBinOp(Sc, OpLoc, Opc, Functions);
// Build the (potentially-overloaded, potentially-dependent)
// binary operation.
@@ -14313,6 +14472,47 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
}
+ if (getLangOpts().RecoveryAST &&
+ (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent())) {
+ assert(!getLangOpts().CPlusPlus);
+ assert((LHSExpr->containsErrors() || RHSExpr->containsErrors()) &&
+ "Should only occur in error-recovery path.");
+ if (BinaryOperator::isCompoundAssignmentOp(Opc))
+ // C [6.15.16] p3:
+ // An assignment expression has the value of the left operand after the
+ // assignment, but is not an lvalue.
+ return CompoundAssignOperator::Create(
+ Context, LHSExpr, RHSExpr, Opc,
+ LHSExpr->getType().getUnqualifiedType(), VK_RValue, OK_Ordinary,
+ OpLoc, CurFPFeatureOverrides());
+ QualType ResultType;
+ switch (Opc) {
+ case BO_Assign:
+ ResultType = LHSExpr->getType().getUnqualifiedType();
+ break;
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_LAnd:
+ case BO_LOr:
+ // These operators have a fixed result type regardless of operands.
+ ResultType = Context.IntTy;
+ break;
+ case BO_Comma:
+ ResultType = RHSExpr->getType();
+ break;
+ default:
+ ResultType = Context.DependentTy;
+ break;
+ }
+ return BinaryOperator::Create(Context, LHSExpr, RHSExpr, Opc, ResultType,
+ VK_RValue, OK_Ordinary, OpLoc,
+ CurFPFeatureOverrides());
+ }
+
// Build a built-in binary operation.
return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr);
}
@@ -14540,7 +14740,8 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
OpLoc, CanOverflow, CurFPFeatureOverrides());
if (Opc == UO_Deref && UO->getType()->hasAttr(attr::NoDeref) &&
- !isa<ArrayType>(UO->getType().getDesugaredType(Context)))
+ !isa<ArrayType>(UO->getType().getDesugaredType(Context)) &&
+ !isUnevaluatedContext())
ExprEvalContexts.back().PossibleDerefs.insert(UO);
// Convert the result back to a half vector.
@@ -14620,15 +14821,11 @@ ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
if (getLangOpts().CPlusPlus && Input->getType()->isOverloadableType() &&
UnaryOperator::getOverloadedOperator(Opc) != OO_None &&
!(Opc == UO_AddrOf && isQualifiedMemberAccess(Input))) {
- // Find all of the overloaded operators visible from this
- // point. We perform both an operator-name lookup from the local
- // scope and an argument-dependent lookup based on the types of
- // the arguments.
+ // Find all of the overloaded operators visible from this point.
UnresolvedSet<16> Functions;
OverloadedOperatorKind OverOp = UnaryOperator::getOverloadedOperator(Opc);
if (S && OverOp != OO_None)
- LookupOverloadedOperatorName(OverOp, S, Input->getType(), QualType(),
- Functions);
+ LookupOverloadedOperatorName(OverOp, S, Functions);
return CreateOverloadedUnaryOp(OpLoc, Opc, Functions, Input);
}
@@ -14935,9 +15132,8 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
} else {
// The conditional expression is required to be a constant expression.
llvm::APSInt condEval(32);
- ExprResult CondICE
- = VerifyIntegerConstantExpression(CondExpr, &condEval,
- diag::err_typecheck_choose_expr_requires_constant, false);
+ ExprResult CondICE = VerifyIntegerConstantExpression(
+ CondExpr, &condEval, diag::err_typecheck_choose_expr_requires_constant);
if (CondICE.isInvalid())
return ExprError();
CondExpr = CondICE.get();
@@ -14993,7 +15189,7 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope) {
assert(ParamInfo.getIdentifier() == nullptr &&
"block-id should have no identifier!");
- assert(ParamInfo.getContext() == DeclaratorContext::BlockLiteralContext);
+ assert(ParamInfo.getContext() == DeclaratorContext::BlockLiteral);
BlockScopeInfo *CurBlock = getCurBlock();
TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope);
@@ -15041,10 +15237,10 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
CurBlock->TheDecl->setSignatureAsWritten(Sig);
CurBlock->FunctionType = T;
- const FunctionType *Fn = T->getAs<FunctionType>();
+ const auto *Fn = T->castAs<FunctionType>();
QualType RetTy = Fn->getReturnType();
bool isVariadic =
- (isa<FunctionProtoType>(Fn) && cast<FunctionProtoType>(Fn)->isVariadic());
+ (isa<FunctionProtoType>(Fn) && cast<FunctionProtoType>(Fn)->isVariadic());
CurBlock->TheDecl->setIsVariadic(isVariadic);
@@ -15207,10 +15403,6 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
PopDeclContext();
- // Pop the block scope now but keep it alive to the end of this function.
- AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
- PoppedFunctionScopePtr ScopeRAII = PopFunctionScopeInfo(&WP, BD, BlockTy);
-
// Set the captured variables on the block.
SmallVector<BlockDecl::Capture, 4> Captures;
for (Capture &Cap : BSI->Captures) {
@@ -15278,6 +15470,10 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
}
BD->setCaptures(Context, Captures, BSI->CXXThisCaptureIndex != 0);
+ // Pop the block scope now but keep it alive to the end of this function.
+ AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
+ PoppedFunctionScopePtr ScopeRAII = PopFunctionScopeInfo(&WP, BD, BlockTy);
+
BlockExpr *Result = new (Context) BlockExpr(BD, BlockTy);
// If the block isn't obviously global, i.e. it captures anything at
@@ -15543,7 +15739,6 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
bool CheckInferredResultType = false;
bool isInvalid = false;
unsigned DiagKind = 0;
- FixItHint Hint;
ConversionFixItGenerator ConvHints;
bool MayHaveConvFixit = false;
bool MayHaveFunctionDiff = false;
@@ -15596,10 +15791,9 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
}
CheckInferredResultType = DstType->isObjCObjectPointerType() &&
SrcType->isObjCObjectPointerType();
- if (Hint.isNull() && !CheckInferredResultType) {
+ if (!CheckInferredResultType) {
ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
- }
- else if (CheckInferredResultType) {
+ } else if (CheckInferredResultType) {
SrcType = SrcType.getUnqualifiedType();
DstType = DstType.getUnqualifiedType();
}
@@ -15767,14 +15961,22 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
else
FDiag << FirstType << SecondType << Action << SrcExpr->getSourceRange();
+ if (DiagKind == diag::ext_typecheck_convert_incompatible_pointer_sign ||
+ DiagKind == diag::err_typecheck_convert_incompatible_pointer_sign) {
+ auto isPlainChar = [](const clang::Type *Type) {
+ return Type->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ Type->isSpecificBuiltinType(BuiltinType::Char_U);
+ };
+ FDiag << (isPlainChar(FirstType->getPointeeOrArrayElementType()) ||
+ isPlainChar(SecondType->getPointeeOrArrayElementType()));
+ }
+
// If we can fix the conversion, suggest the FixIts.
- assert(ConvHints.isNull() || Hint.isNull());
if (!ConvHints.isNull()) {
for (FixItHint &H : ConvHints.Hints)
FDiag << H;
- } else {
- FDiag << Hint;
}
+
if (MayHaveConvFixit) { FDiag << (unsigned) (ConvHints.Kind); }
if (MayHaveFunctionDiff)
@@ -15803,21 +16005,27 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
}
ExprResult Sema::VerifyIntegerConstantExpression(Expr *E,
- llvm::APSInt *Result) {
+ llvm::APSInt *Result,
+ AllowFoldKind CanFold) {
class SimpleICEDiagnoser : public VerifyICEDiagnoser {
public:
- void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
- S.Diag(Loc, diag::err_expr_not_ice) << S.LangOpts.CPlusPlus << SR;
+ SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_ice_not_integral)
+ << T << S.LangOpts.CPlusPlus;
+ }
+ SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) override {
+ return S.Diag(Loc, diag::err_expr_not_ice) << S.LangOpts.CPlusPlus;
}
} Diagnoser;
- return VerifyIntegerConstantExpression(E, Result, Diagnoser);
+ return VerifyIntegerConstantExpression(E, Result, Diagnoser, CanFold);
}
ExprResult Sema::VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result,
unsigned DiagID,
- bool AllowFold) {
+ AllowFoldKind CanFold) {
class IDDiagnoser : public VerifyICEDiagnoser {
unsigned DiagID;
@@ -15825,23 +16033,29 @@ ExprResult Sema::VerifyIntegerConstantExpression(Expr *E,
IDDiagnoser(unsigned DiagID)
: VerifyICEDiagnoser(DiagID == 0), DiagID(DiagID) { }
- void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
- S.Diag(Loc, DiagID) << SR;
+ SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) override {
+ return S.Diag(Loc, DiagID);
}
} Diagnoser(DiagID);
- return VerifyIntegerConstantExpression(E, Result, Diagnoser, AllowFold);
+ return VerifyIntegerConstantExpression(E, Result, Diagnoser, CanFold);
+}
+
+Sema::SemaDiagnosticBuilder
+Sema::VerifyICEDiagnoser::diagnoseNotICEType(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return diagnoseNotICE(S, Loc);
}
-void Sema::VerifyICEDiagnoser::diagnoseFold(Sema &S, SourceLocation Loc,
- SourceRange SR) {
- S.Diag(Loc, diag::ext_expr_not_ice) << SR << S.LangOpts.CPlusPlus;
+Sema::SemaDiagnosticBuilder
+Sema::VerifyICEDiagnoser::diagnoseFold(Sema &S, SourceLocation Loc) {
+ return S.Diag(Loc, diag::ext_expr_not_ice) << S.LangOpts.CPlusPlus;
}
ExprResult
Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
- bool AllowFold) {
+ AllowFoldKind CanFold) {
SourceLocation DiagLoc = E->getBeginLoc();
if (getLangOpts().CPlusPlus11) {
@@ -15852,14 +16066,16 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
// unscoped enumeration type
ExprResult Converted;
class CXX11ConvertDiagnoser : public ICEConvertDiagnoser {
+ VerifyICEDiagnoser &BaseDiagnoser;
public:
- CXX11ConvertDiagnoser(bool Silent)
- : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false,
- Silent, true) {}
+ CXX11ConvertDiagnoser(VerifyICEDiagnoser &BaseDiagnoser)
+ : ICEConvertDiagnoser(/*AllowScopedEnumerations*/ false,
+ BaseDiagnoser.Suppress, true),
+ BaseDiagnoser(BaseDiagnoser) {}
SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
QualType T) override {
- return S.Diag(Loc, diag::err_ice_not_integral) << T;
+ return BaseDiagnoser.diagnoseNotICEType(S, Loc, T);
}
SemaDiagnosticBuilder diagnoseIncomplete(
@@ -15893,7 +16109,7 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
llvm_unreachable("conversion functions are permitted");
}
- } ConvertDiagnoser(Diagnoser.Suppress);
+ } ConvertDiagnoser(Diagnoser);
Converted = PerformContextualImplicitConversion(DiagLoc, E,
ConvertDiagnoser);
@@ -15905,7 +16121,8 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
} else if (!E->getType()->isIntegralOrUnscopedEnumerationType()) {
// An ICE must be of integral or unscoped enumeration type.
if (!Diagnoser.Suppress)
- Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange());
+ Diagnoser.diagnoseNotICEType(*this, DiagLoc, E->getType())
+ << E->getSourceRange();
return ExprError();
}
@@ -15956,9 +16173,9 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
Notes.clear();
}
- if (!Folded || !AllowFold) {
+ if (!Folded || !CanFold) {
if (!Diagnoser.Suppress) {
- Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange());
+ Diagnoser.diagnoseNotICE(*this, DiagLoc) << E->getSourceRange();
for (const PartialDiagnosticAt &Note : Notes)
Diag(Note.first, Note.second);
}
@@ -15966,7 +16183,7 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
return ExprError();
}
- Diagnoser.diagnoseFold(*this, DiagLoc, E->getSourceRange());
+ Diagnoser.diagnoseFold(*this, DiagLoc) << E->getSourceRange();
for (const PartialDiagnosticAt &Note : Notes)
Diag(Note.first, Note.second);
@@ -16148,8 +16365,8 @@ static void EvaluateAndDiagnoseImmediateInvocation(
Expr::EvalResult Eval;
Eval.Diag = &Notes;
ConstantExpr *CE = Candidate.getPointer();
- bool Result = CE->EvaluateAsConstantExpr(Eval, Expr::EvaluateForCodeGen,
- SemaRef.getASTContext(), true);
+ bool Result = CE->EvaluateAsConstantExpr(
+ Eval, SemaRef.getASTContext(), ConstantExprKind::ImmediateInvocation);
if (!Result || !Notes.empty()) {
Expr *InnerExpr = CE->getSubExpr()->IgnoreImplicit();
if (auto *FunctionalCast = dyn_cast<CXXFunctionalCastExpr>(InnerExpr))
@@ -16532,8 +16749,13 @@ static OdrUseContext isOdrUseContext(Sema &SemaRef) {
}
static bool isImplicitlyDefinableConstexprFunction(FunctionDecl *Func) {
- return Func->isConstexpr() &&
- (Func->isImplicitlyInstantiable() || !Func->isUserProvided());
+ if (!Func->isConstexpr())
+ return false;
+
+ if (Func->isImplicitlyInstantiable() || !Func->isUserProvided())
+ return true;
+ auto *CCD = dyn_cast<CXXConstructorDecl>(Func);
+ return CCD && CCD->getInheritedConstructor();
}
/// Mark a function referenced, and check whether it is odr-used
@@ -16686,7 +16908,11 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool FirstInstantiation = PointOfInstantiation.isInvalid();
if (FirstInstantiation) {
PointOfInstantiation = Loc;
- Func->setTemplateSpecializationKind(TSK, PointOfInstantiation);
+ if (auto *MSI = Func->getMemberSpecializationInfo())
+ MSI->setPointOfInstantiation(Loc);
+ // FIXME: Notify listener.
+ else
+ Func->setTemplateSpecializationKind(TSK, PointOfInstantiation);
} else if (TSK != TSK_ImplicitInstantiation) {
// Use the point of use as the point of instantiation, instead of the
// point of explicit instantiation (which we track as the actual point
@@ -16925,8 +17151,7 @@ static bool isVariableCapturable(CapturingScopeInfo *CSI, VarDecl *Var,
if (Var->getType()->isVariablyModifiedType() && IsBlock) {
if (Diagnose) {
S.Diag(Loc, diag::err_ref_vm_type);
- S.Diag(Var->getLocation(), diag::note_previous_decl)
- << Var->getDeclName();
+ S.Diag(Var->getLocation(), diag::note_previous_decl) << Var;
}
return false;
}
@@ -16938,10 +17163,8 @@ static bool isVariableCapturable(CapturingScopeInfo *CSI, VarDecl *Var,
if (IsBlock)
S.Diag(Loc, diag::err_ref_flexarray_type);
else
- S.Diag(Loc, diag::err_lambda_capture_flexarray_type)
- << Var->getDeclName();
- S.Diag(Var->getLocation(), diag::note_previous_decl)
- << Var->getDeclName();
+ S.Diag(Loc, diag::err_lambda_capture_flexarray_type) << Var;
+ S.Diag(Var->getLocation(), diag::note_previous_decl) << Var;
}
return false;
}
@@ -16951,10 +17174,8 @@ static bool isVariableCapturable(CapturingScopeInfo *CSI, VarDecl *Var,
// variables; they don't support the expected semantics.
if (HasBlocksAttr && (IsLambda || isa<CapturedRegionScopeInfo>(CSI))) {
if (Diagnose) {
- S.Diag(Loc, diag::err_capture_block_variable)
- << Var->getDeclName() << !IsLambda;
- S.Diag(Var->getLocation(), diag::note_previous_decl)
- << Var->getDeclName();
+ S.Diag(Loc, diag::err_capture_block_variable) << Var << !IsLambda;
+ S.Diag(Var->getLocation(), diag::note_previous_decl) << Var;
}
return false;
}
@@ -16985,8 +17206,7 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
if (!Invalid && !S.getLangOpts().OpenCL && CaptureType->isArrayType()) {
if (BuildAndDiagnose) {
S.Diag(Loc, diag::err_ref_array_type);
- S.Diag(Var->getLocation(), diag::note_previous_decl)
- << Var->getDeclName();
+ S.Diag(Var->getLocation(), diag::note_previous_decl) << Var;
Invalid = true;
} else {
return false;
@@ -16999,8 +17219,7 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
if (BuildAndDiagnose) {
S.Diag(Loc, diag::err_arc_autoreleasing_capture)
<< /*block*/ 0;
- S.Diag(Var->getLocation(), diag::note_previous_decl)
- << Var->getDeclName();
+ S.Diag(Var->getLocation(), diag::note_previous_decl) << Var;
Invalid = true;
} else {
return false;
@@ -17270,9 +17489,8 @@ bool Sema::tryCaptureVariable(
if (BuildAndDiagnose) {
LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CSI);
if (LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None) {
- Diag(ExprLoc, diag::err_lambda_impcap) << Var->getDeclName();
- Diag(Var->getLocation(), diag::note_previous_decl)
- << Var->getDeclName();
+ Diag(ExprLoc, diag::err_lambda_impcap) << Var;
+ Diag(Var->getLocation(), diag::note_previous_decl) << Var;
Diag(LSI->Lambda->getBeginLoc(), diag::note_lambda_decl);
} else
diagnoseUncapturableValueReference(*this, ExprLoc, Var, DC);
@@ -17335,7 +17553,11 @@ bool Sema::tryCaptureVariable(
if (IsTargetCap || IsOpenMPPrivateDecl == OMPC_private ||
(IsGlobal && !IsGlobalCap)) {
Nested = !IsTargetCap;
+ bool HasConst = DeclRefType.isConstQualified();
DeclRefType = DeclRefType.getUnqualifiedType();
+ // Don't lose diagnostics about assignments to const.
+ if (HasConst)
+ DeclRefType.addConst();
CaptureType = Context.getLValueReferenceType(DeclRefType);
break;
}
@@ -17346,9 +17568,8 @@ bool Sema::tryCaptureVariable(
// No capture-default, and this is not an explicit capture
// so cannot capture this variable.
if (BuildAndDiagnose) {
- Diag(ExprLoc, diag::err_lambda_impcap) << Var->getDeclName();
- Diag(Var->getLocation(), diag::note_previous_decl)
- << Var->getDeclName();
+ Diag(ExprLoc, diag::err_lambda_impcap) << Var;
+ Diag(Var->getLocation(), diag::note_previous_decl) << Var;
if (cast<LambdaScopeInfo>(CSI)->Lambda)
Diag(cast<LambdaScopeInfo>(CSI)->Lambda->getBeginLoc(),
diag::note_lambda_decl);
@@ -17868,6 +18089,24 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
if (Var->isInvalidDecl())
return;
+ // Record a CUDA/HIP static device/constant variable if it is referenced
+ // by host code. This is done conservatively, when the variable is referenced
+ // in any of the following contexts:
+ // - a non-function context
+ // - a host function
+ // - a host device function
+ // This also requires the reference of the static device/constant variable by
+ // host code to be visible in the device compilation for the compiler to be
+ // able to externalize the static device/constant variable.
+ if (SemaRef.getASTContext().mayExternalizeStaticVar(Var)) {
+ auto *CurContext = SemaRef.CurContext;
+ if (!CurContext || !isa<FunctionDecl>(CurContext) ||
+ cast<FunctionDecl>(CurContext)->hasAttr<CUDAHostAttr>() ||
+ (!cast<FunctionDecl>(CurContext)->hasAttr<CUDADeviceAttr>() &&
+ !cast<FunctionDecl>(CurContext)->hasAttr<CUDAGlobalAttr>()))
+ SemaRef.getASTContext().CUDAStaticDeviceVarReferencedByHost.insert(Var);
+ }
+
auto *MSI = Var->getMemberSpecializationInfo();
TemplateSpecializationKind TSK = MSI ? MSI->getTemplateSpecializationKind()
: Var->getTemplateSpecializationKind();
@@ -17887,8 +18126,6 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
bool NeedDefinition =
OdrUse == OdrUseContext::Used || NeededForConstantEvaluation;
- VarTemplateSpecializationDecl *VarSpec =
- dyn_cast<VarTemplateSpecializationDecl>(Var);
assert(!isa<VarTemplatePartialSpecializationDecl>(Var) &&
"Can't instantiate a partial template specialization.");
@@ -17919,34 +18156,33 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
PointOfInstantiation = Loc;
if (MSI)
MSI->setPointOfInstantiation(PointOfInstantiation);
+ // FIXME: Notify listener.
else
Var->setTemplateSpecializationKind(TSK, PointOfInstantiation);
}
- bool InstantiationDependent = false;
- bool IsNonDependent =
- VarSpec ? !TemplateSpecializationType::anyDependentTemplateArguments(
- VarSpec->getTemplateArgsInfo(), InstantiationDependent)
- : true;
-
- // Do not instantiate specializations that are still type-dependent.
- if (IsNonDependent) {
- if (UsableInConstantExpr) {
- // Do not defer instantiations of variables that could be used in a
- // constant expression.
- SemaRef.runWithSufficientStackSpace(PointOfInstantiation, [&] {
- SemaRef.InstantiateVariableDefinition(PointOfInstantiation, Var);
- });
- } else if (FirstInstantiation ||
- isa<VarTemplateSpecializationDecl>(Var)) {
- // FIXME: For a specialization of a variable template, we don't
- // distinguish between "declaration and type implicitly instantiated"
- // and "implicit instantiation of definition requested", so we have
- // no direct way to avoid enqueueing the pending instantiation
- // multiple times.
- SemaRef.PendingInstantiations
- .push_back(std::make_pair(Var, PointOfInstantiation));
- }
+ if (UsableInConstantExpr) {
+ // Do not defer instantiations of variables that could be used in a
+ // constant expression.
+ SemaRef.runWithSufficientStackSpace(PointOfInstantiation, [&] {
+ SemaRef.InstantiateVariableDefinition(PointOfInstantiation, Var);
+ });
+
+ // Re-set the member to trigger a recomputation of the dependence bits
+ // for the expression.
+ if (auto *DRE = dyn_cast_or_null<DeclRefExpr>(E))
+ DRE->setDecl(DRE->getDecl());
+ else if (auto *ME = dyn_cast_or_null<MemberExpr>(E))
+ ME->setMemberDecl(ME->getMemberDecl());
+ } else if (FirstInstantiation ||
+ isa<VarTemplateSpecializationDecl>(Var)) {
+ // FIXME: For a specialization of a variable template, we don't
+ // distinguish between "declaration and type implicitly instantiated"
+ // and "implicit instantiation of definition requested", so we have
+ // no direct way to avoid enqueueing the pending instantiation
+ // multiple times.
+ SemaRef.PendingInstantiations
+ .push_back(std::make_pair(Var, PointOfInstantiation));
}
}
}
@@ -18072,6 +18308,9 @@ static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc,
}
/// Perform reference-marking and odr-use handling for a DeclRefExpr.
+///
+/// Note, this may change the dependence of the DeclRefExpr, and so needs to be
+/// handled with care if the DeclRefExpr is not newly-created.
void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) {
// TODO: update this with DR# once a defect report is filed.
// C++11 defect. The address of a pure member should not be an ODR use, even
@@ -18198,6 +18437,10 @@ public:
if (VD->hasLocalStorage())
return;
}
+
+ // FIXME: This can trigger the instantiation of the initializer of a
+ // variable, which can cause the expression to become value-dependent
+ // or error-dependent. Do we need to propagate the new dependence bits?
S.MarkDeclRefReferenced(E);
}
@@ -18312,7 +18555,7 @@ bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
}
S.Diag(Loc, diag::err_call_function_incomplete_return)
- << CE->getSourceRange() << FD->getDeclName() << T;
+ << CE->getSourceRange() << FD << T;
S.Diag(FD->getLocation(), diag::note_entity_declared_at)
<< FD->getDeclName();
}
@@ -18462,9 +18705,12 @@ Sema::ConditionResult Sema::ActOnCondition(Scope *S, SourceLocation Loc,
Cond = CheckSwitchCondition(Loc, SubExpr);
break;
}
- if (Cond.isInvalid())
- return ConditionError();
-
+ if (Cond.isInvalid()) {
+ Cond = CreateRecoveryExpr(SubExpr->getBeginLoc(), SubExpr->getEndLoc(),
+ {SubExpr});
+ if (!Cond.get())
+ return ConditionError();
+ }
// FIXME: FullExprArg doesn't have an invalid bit, so check nullness instead.
FullExprArg FullExpr = MakeFullExpr(Cond.get(), Loc);
if (!FullExpr.get())
@@ -18853,7 +19099,7 @@ ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
S.Context, FD->getDeclContext(), Loc, Loc,
FD->getNameInfo().getName(), DestType, FD->getTypeSourceInfo(),
SC_None, false /*isInlineSpecified*/, FD->hasPrototype(),
- /*ConstexprKind*/ CSK_unspecified);
+ /*ConstexprKind*/ ConstexprSpecKind::Unspecified);
if (FD->getQualifier())
NewFD->setQualifierInfo(FD->getQualifierLoc());
@@ -19001,7 +19247,7 @@ static ExprResult diagnoseUnknownAnyExpr(Sema &S, Expr *E) {
/// Check for operands with placeholder types and complain if found.
/// Returns ExprError() if there was an error and no recovery was possible.
ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
- if (!getLangOpts().CPlusPlus) {
+ if (!Context.isDependenceAllowed()) {
// C cannot handle TypoExpr nodes on either side of a binop because it
// doesn't handle dependent types properly, so make sure any TypoExprs have
// been dealt with before checking the operands.
@@ -19078,7 +19324,8 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
CK_BuiltinFnToFnPtr)
.get();
return CallExpr::Create(Context, E, /*Args=*/{}, Context.IntTy,
- VK_RValue, SourceLocation());
+ VK_RValue, SourceLocation(),
+ FPOptionsOverride());
}
}
@@ -19115,6 +19362,9 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
#define BUILTIN_TYPE(Id, SingletonId) case BuiltinType::Id:
#define PLACEHOLDER_TYPE(Id, SingletonId)
#include "clang/AST/BuiltinTypes.def"
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index d885920b6c14..d91db60f17a0 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -172,7 +172,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
bool Failed = false;
llvm::SmallVector<NamedDecl*, 8> FoundDecls;
- llvm::SmallSet<CanonicalDeclPtr<Decl>, 8> FoundDeclSet;
+ llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 8> FoundDeclSet;
// If we have an object type, it's because we are in a
// pseudo-destructor-expression or a member access expression, and
@@ -663,7 +663,16 @@ Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
}
// The operand is an expression.
- return BuildCXXTypeId(TypeInfoType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
+ ExprResult Result =
+ BuildCXXTypeId(TypeInfoType, OpLoc, (Expr *)TyOrExpr, RParenLoc);
+
+ if (!getLangOpts().RTTIData && !Result.isInvalid())
+ if (auto *CTE = dyn_cast<CXXTypeidExpr>(Result.get()))
+ if (CTE->isPotentiallyEvaluated() && !CTE->isMostDerived(Context))
+ Diag(OpLoc, diag::warn_no_typeid_with_rtti_disabled)
+ << (getDiagnostics().getDiagnosticOptions().getFormat() ==
+ DiagnosticOptions::MSVC);
+ return Result;
}
/// Grabs __declspec(uuid()) off a type, or returns 0 if we cannot resolve to
@@ -875,6 +884,10 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
Ex = Res.get();
}
+ // PPC MMA non-pointer types are not allowed as throw expr types.
+ if (Ex && Context.getTargetInfo().getTriple().isPPC64())
+ CheckPPCMMAType(Ex->getType(), Ex->getBeginLoc());
+
return new (Context)
CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
}
@@ -1389,6 +1402,9 @@ Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
if (!Result.isInvalid() && Result.get()->isInstantiationDependent() &&
!Result.get()->isTypeDependent())
Result = CorrectDelayedTyposInExpr(Result.get());
+ else if (Result.isInvalid())
+ Result = CreateRecoveryExpr(TInfo->getTypeLoc().getBeginLoc(),
+ RParenOrBraceLoc, exprs, Ty);
return Result;
}
@@ -1401,16 +1417,6 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
QualType Ty = TInfo->getType();
SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
- if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs)) {
- // FIXME: CXXUnresolvedConstructExpr does not model list-initialization
- // directly. We work around this by dropping the locations of the braces.
- SourceRange Locs = ListInitialization
- ? SourceRange()
- : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
- return CXXUnresolvedConstructExpr::Create(Context, TInfo, Locs.getBegin(),
- Exprs, Locs.getEnd());
- }
-
assert((!ListInitialization ||
(Exprs.size() == 1 && isa<InitListExpr>(Exprs[0]))) &&
"List initialization must have initializer list as expression.");
@@ -1439,6 +1445,17 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
}
+ if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs)) {
+ // FIXME: CXXUnresolvedConstructExpr does not model list-initialization
+ // directly. We work around this by dropping the locations of the braces.
+ SourceRange Locs = ListInitialization
+ ? SourceRange()
+ : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
+ return CXXUnresolvedConstructExpr::Create(Context, Ty.getNonReferenceType(),
+ TInfo, Locs.getBegin(), Exprs,
+ Locs.getEnd());
+ }
+
// C++ [expr.type.conv]p1:
// If the expression list is a parenthesized single expression, the type
// conversion expression is equivalent (in definedness, and if defined in
@@ -1500,7 +1517,8 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
: SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
Result = CXXFunctionalCastExpr::Create(
Context, ResultType, Expr::getValueKindForType(Ty), TInfo, CK_NoOp,
- Result.get(), /*Path=*/nullptr, Locs.getBegin(), Locs.getEnd());
+ Result.get(), /*Path=*/nullptr, CurFPFeatureOverrides(),
+ Locs.getBegin(), Locs.getEnd());
}
return Result;
@@ -1509,9 +1527,24 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
// [CUDA] Ignore this function, if we can't call it.
const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
- if (getLangOpts().CUDA &&
- IdentifyCUDAPreference(Caller, Method) <= CFP_WrongSide)
- return false;
+ if (getLangOpts().CUDA) {
+ auto CallPreference = IdentifyCUDAPreference(Caller, Method);
+ // If it's not callable at all, it's not the right function.
+ if (CallPreference < CFP_WrongSide)
+ return false;
+ if (CallPreference == CFP_WrongSide) {
+ // Maybe. We have to check if there are better alternatives.
+ DeclContext::lookup_result R =
+ Method->getDeclContext()->lookup(Method->getDeclName());
+ for (const auto *D : R) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (IdentifyCUDAPreference(Caller, FD) > CFP_WrongSide)
+ return false;
+ }
+ }
+ // We've found no better variants.
+ }
+ }
SmallVector<const FunctionDecl*, 4> PreventedBy;
bool Result = Method->isUsualDeallocationFunction(PreventedBy);
@@ -1753,22 +1786,22 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr;
if (Expr *NumElts = (Expr *)Array.NumElts) {
if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
+ // FIXME: GCC permits constant folding here. We should either do so consistently
+ // or not do so at all, rather than changing behavior in C++14 onwards.
if (getLangOpts().CPlusPlus14) {
// C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
// shall be a converted constant expression (5.19) of type std::size_t
// and shall evaluate to a strictly positive value.
- unsigned IntWidth = Context.getTargetInfo().getIntWidth();
- assert(IntWidth && "Builtin type of size 0?");
- llvm::APSInt Value(IntWidth);
+ llvm::APSInt Value(Context.getIntWidth(Context.getSizeType()));
Array.NumElts
= CheckConvertedConstantExpression(NumElts, Context.getSizeType(), Value,
- CCEK_NewExpr)
+ CCEK_ArrayBound)
.get();
} else {
- Array.NumElts
- = VerifyIntegerConstantExpression(NumElts, nullptr,
- diag::err_new_array_nonconst)
- .get();
+ Array.NumElts =
+ VerifyIntegerConstantExpression(
+ NumElts, nullptr, diag::err_new_array_nonconst, AllowFold)
+ .get();
}
if (!Array.NumElts)
return ExprError();
@@ -1832,12 +1865,13 @@ void Sema::diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
const llvm::Triple &T = getASTContext().getTargetInfo().getTriple();
StringRef OSName = AvailabilityAttr::getPlatformNameSourceSpelling(
getASTContext().getTargetInfo().getPlatformName());
+ VersionTuple OSVersion = alignedAllocMinVersion(T.getOS());
OverloadedOperatorKind Kind = FD.getDeclName().getCXXOverloadedOperator();
bool IsDelete = Kind == OO_Delete || Kind == OO_Array_Delete;
Diag(Loc, diag::err_aligned_allocation_unavailable)
<< IsDelete << FD.getType().getAsString() << OSName
- << alignedAllocMinVersion(T.getOS()).getAsString();
+ << OSVersion.getAsString() << OSVersion.empty();
Diag(Loc, diag::note_silence_aligned_allocation_unavailable);
}
}
@@ -2073,29 +2107,29 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// per CWG1464. Otherwise, if it's not a constant, we must have an
// unparenthesized array type.
if (!(*ArraySize)->isValueDependent()) {
- llvm::APSInt Value;
// We've already performed any required implicit conversion to integer or
// unscoped enumeration type.
// FIXME: Per CWG1464, we are required to check the value prior to
// converting to size_t. This will never find a negative array size in
// C++14 onwards, because Value is always unsigned here!
- if ((*ArraySize)->isIntegerConstantExpr(Value, Context)) {
- if (Value.isSigned() && Value.isNegative()) {
+ if (Optional<llvm::APSInt> Value =
+ (*ArraySize)->getIntegerConstantExpr(Context)) {
+ if (Value->isSigned() && Value->isNegative()) {
return ExprError(Diag((*ArraySize)->getBeginLoc(),
diag::err_typecheck_negative_array_size)
<< (*ArraySize)->getSourceRange());
}
if (!AllocType->isDependentType()) {
- unsigned ActiveSizeBits =
- ConstantArrayType::getNumAddressingBits(Context, AllocType, Value);
+ unsigned ActiveSizeBits = ConstantArrayType::getNumAddressingBits(
+ Context, AllocType, *Value);
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
return ExprError(
Diag((*ArraySize)->getBeginLoc(), diag::err_array_too_large)
- << Value.toString(10) << (*ArraySize)->getSourceRange());
+ << Value->toString(10) << (*ArraySize)->getSourceRange());
}
- KnownArraySize = Value.getZExtValue();
+ KnownArraySize = Value->getZExtValue();
} else if (TypeIdParens.isValid()) {
// Can't have dynamic array size when the type-id is in parentheses.
Diag((*ArraySize)->getBeginLoc(), diag::ext_new_paren_array_nonconst)
@@ -2203,7 +2237,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
SizeTy, SourceLocation());
ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
CK_IntegralCast, &AlignmentLiteral,
- VK_RValue);
+ VK_RValue, FPOptionsOverride());
// Adjust placement args by prepending conjured size and alignment exprs.
llvm::SmallVector<Expr *, 8> CallArgs;
@@ -2611,8 +2645,24 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
if (FoundDelete.isAmbiguous())
return true; // FIXME: clean up expressions?
+ // Filter out any destroying operator deletes. We can't possibly call such a
+ // function in this context, because we're handling the case where the object
+ // was not successfully constructed.
+ // FIXME: This is not covered by the language rules yet.
+ {
+ LookupResult::Filter Filter = FoundDelete.makeFilter();
+ while (Filter.hasNext()) {
+ auto *FD = dyn_cast<FunctionDecl>(Filter.next()->getUnderlyingDecl());
+ if (FD && FD->isDestroyingOperatorDelete())
+ Filter.erase();
+ }
+ Filter.done();
+ }
+
bool FoundGlobalDelete = FoundDelete.empty();
if (FoundDelete.empty()) {
+ FoundDelete.clear(LookupOrdinaryName);
+
if (DeleteScope == AFS_Class)
return true;
@@ -3914,7 +3964,8 @@ static ExprResult BuildCXXCastArgument(Sema &S,
// Record usage of conversion in an implicit cast.
Result = ImplicitCastExpr::Create(S.Context, Result.get()->getType(),
CK_UserDefinedConversion, Result.get(),
- nullptr, Result.get()->getValueKind());
+ nullptr, Result.get()->getValueKind(),
+ S.CurFPFeatureOverrides());
return S.MaybeBindToTemporary(Result.get());
}
@@ -4095,7 +4146,8 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) {
FromType = FromAtomic->getValueType().getUnqualifiedType();
From = ImplicitCastExpr::Create(Context, FromType, CK_AtomicToNonAtomic,
- From, /*BasePath=*/nullptr, VK_RValue);
+ From, /*BasePath=*/nullptr, VK_RValue,
+ FPOptionsOverride());
}
break;
@@ -4322,6 +4374,12 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
+ case ICK_SVE_Vector_Conversion:
+ From = ImpCastExprToType(From, ToType, CK_BitCast, VK_RValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
+ break;
+
case ICK_Vector_Splat: {
// Vector splat from any arithmetic type to a vector.
Expr *Elem = prepareVectorSplat(ToType, From).get();
@@ -5465,9 +5523,9 @@ static uint64_t EvaluateArrayTypeTrait(Sema &Self, ArrayTypeTrait ATT,
case ATT_ArrayExtent: {
llvm::APSInt Value;
uint64_t Dim;
- if (Self.VerifyIntegerConstantExpression(DimExpr, &Value,
- diag::err_dimension_expr_not_constant_integer,
- false).isInvalid())
+ if (Self.VerifyIntegerConstantExpression(
+ DimExpr, &Value, diag::err_dimension_expr_not_constant_integer)
+ .isInvalid())
return 0;
if (Value.isSigned() && Value.isNegative()) {
Self.Diag(KeyLoc, diag::err_dimension_expr_not_constant_integer)
@@ -6304,6 +6362,8 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// Similarly, attempt to find composite type of two objective-c pointers.
Composite = FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
if (!Composite.isNull())
return Composite;
@@ -6529,12 +6589,16 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
// FIXME: In C, we merge __strong and none to __strong at the top level.
if (Q1.getObjCGCAttr() == Q2.getObjCGCAttr())
Quals.setObjCGCAttr(Q1.getObjCGCAttr());
+ else if (T1->isVoidPointerType() || T2->isVoidPointerType())
+ assert(Steps.size() == 1);
else
return QualType();
// Mismatched lifetime qualifiers never compatibly include each other.
if (Q1.getObjCLifetime() == Q2.getObjCLifetime())
Quals.setObjCLifetime(Q1.getObjCLifetime());
+ else if (T1->isVoidPointerType() || T2->isVoidPointerType())
+ assert(Steps.size() == 1);
else
return QualType();
@@ -6833,7 +6897,7 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject
: CK_ARCReclaimReturnedObject);
return ImplicitCastExpr::Create(Context, E->getType(), ck, E, nullptr,
- VK_RValue);
+ VK_RValue, FPOptionsOverride());
}
if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
@@ -7226,8 +7290,8 @@ ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
return Base;
}
-static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base,
- tok::TokenKind& OpKind, SourceLocation OpLoc) {
+static bool CheckArrow(Sema &S, QualType &ObjectType, Expr *&Base,
+ tok::TokenKind &OpKind, SourceLocation OpLoc) {
if (Base->hasPlaceholderType()) {
ExprResult result = S.CheckPlaceholderExpr(Base);
if (result.isInvalid()) return true;
@@ -7242,6 +7306,18 @@ static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base,
// Note that this is rather different from the normal handling for the
// arrow operator.
if (OpKind == tok::arrow) {
+ // The operator requires a prvalue, so perform lvalue conversions.
+ // Only do this if we might plausibly end with a pointer, as otherwise
+ // this was likely to be intended to be a '.'.
+ if (ObjectType->isPointerType() || ObjectType->isArrayType() ||
+ ObjectType->isFunctionType()) {
+ ExprResult BaseResult = S.DefaultFunctionArrayLvalueConversion(Base);
+ if (BaseResult.isInvalid())
+ return true;
+ Base = BaseResult.get();
+ ObjectType = Base->getType();
+ }
+
if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
ObjectType = Ptr->getPointeeType();
} else if (!Base->isTypeDependent()) {
@@ -7550,6 +7626,11 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
return ExprError();
+ if (DS.getTypeSpecType() == DeclSpec::TST_decltype_auto) {
+ Diag(DS.getTypeSpecTypeLoc(), diag::err_decltype_auto_invalid);
+ return true;
+ }
+
QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc(),
false);
@@ -7616,7 +7697,8 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
ResultType = ResultType.getNonLValueExprType(Context);
CXXMemberCallExpr *CE = CXXMemberCallExpr::Create(
- Context, ME, /*Args=*/{}, ResultType, VK, Exp.get()->getEndLoc());
+ Context, ME, /*Args=*/{}, ResultType, VK, Exp.get()->getEndLoc(),
+ CurFPFeatureOverrides());
if (CheckFunctionCall(Method, CE,
Method->getType()->castAs<FunctionProtoType>()))
@@ -7640,7 +7722,8 @@ ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
Operand = R.get();
- if (!inTemplateInstantiation() && Operand->HasSideEffects(Context, false)) {
+ if (!inTemplateInstantiation() && !Operand->isInstantiationDependent() &&
+ Operand->HasSideEffects(Context, false)) {
// The expression operand for noexcept is in an unevaluated expression
// context, so side effects could result in unintended consequences.
Diag(Operand->getExprLoc(), diag::warn_side_effects_unevaluated_context);
@@ -7977,19 +8060,26 @@ class TransformTypos : public TreeTransform<TransformTypos> {
}
}
- /// If corrections for the first TypoExpr have been exhausted for a
- /// given combination of the other TypoExprs, retry those corrections against
- /// the next combination of substitutions for the other TypoExprs by advancing
- /// to the next potential correction of the second TypoExpr. For the second
- /// and subsequent TypoExprs, if its stream of corrections has been exhausted,
- /// the stream is reset and the next TypoExpr's stream is advanced by one (a
- /// TypoExpr's correction stream is advanced by removing the TypoExpr from the
- /// TransformCache). Returns true if there is still any untried combinations
- /// of corrections.
+ /// Try to advance the typo correction state of the first unfinished TypoExpr.
+ /// We allow advancement of the correction stream by removing it from the
+ /// TransformCache which allows `TransformTypoExpr` to advance during the
+ /// next transformation attempt.
+ ///
+ /// Any substitution attempts for the previous TypoExprs (which must have been
+ /// finished) will need to be retried since it's possible that they will now
+ /// be invalid given the latest advancement.
+ ///
+ /// We need to be sure that we're making progress - it's possible that the
+ /// tree is so malformed that the transform never makes it to the
+ /// `TransformTypoExpr`.
+ ///
+ /// Returns true if there are any untried correction combinations.
bool CheckAndAdvanceTypoExprCorrectionStreams() {
for (auto TE : TypoExprs) {
auto &State = SemaRef.getTypoExprState(TE);
TransformCache.erase(TE);
+ if (!State.Consumer->hasMadeAnyCorrectionProgress())
+ return false;
if (!State.Consumer->finished())
return true;
State.Consumer->resetCorrectionStream();
@@ -8639,6 +8729,9 @@ Sema::ActOnRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc) {
- return RequiresExpr::Create(Context, RequiresKWLoc, Body, LocalParameters,
- Requirements, ClosingBraceLoc);
+ auto *RE = RequiresExpr::Create(Context, RequiresKWLoc, Body, LocalParameters,
+ Requirements, ClosingBraceLoc);
+ if (DiagnoseUnexpandedParameterPackInRequiresExpr(RE))
+ return ExprError();
+ return RE;
}
diff --git a/clang/lib/Sema/SemaExprMember.cpp b/clang/lib/Sema/SemaExprMember.cpp
index ebfc1ec4b974..f5afcb76fc96 100644
--- a/clang/lib/Sema/SemaExprMember.cpp
+++ b/clang/lib/Sema/SemaExprMember.cpp
@@ -231,12 +231,10 @@ static void diagnoseInstanceReference(Sema &SemaRef,
}
/// Builds an expression which might be an implicit member expression.
-ExprResult
-Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- LookupResult &R,
- const TemplateArgumentListInfo *TemplateArgs,
- const Scope *S) {
+ExprResult Sema::BuildPossibleImplicitMemberExpr(
+ const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
+ UnresolvedLookupExpr *AsULE) {
switch (ClassifyImplicitMemberAccess(*this, R)) {
case IMA_Instance:
return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, true, S);
@@ -257,7 +255,7 @@ Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
case IMA_Unresolved_StaticContext:
if (TemplateArgs || TemplateKWLoc.isValid())
return BuildTemplateIdExpr(SS, TemplateKWLoc, R, false, TemplateArgs);
- return BuildDeclarationNameExpr(SS, R, false);
+ return AsULE ? AsULE : BuildDeclarationNameExpr(SS, R, false);
case IMA_Error_StaticContext:
case IMA_Error_Unrelated:
@@ -946,28 +944,6 @@ static bool IsInFnTryBlockHandler(const Scope *S) {
return false;
}
-VarDecl *
-Sema::getVarTemplateSpecialization(VarTemplateDecl *VarTempl,
- const TemplateArgumentListInfo *TemplateArgs,
- const DeclarationNameInfo &MemberNameInfo,
- SourceLocation TemplateKWLoc) {
- if (!TemplateArgs) {
- diagnoseMissingTemplateArguments(TemplateName(VarTempl),
- MemberNameInfo.getBeginLoc());
- return nullptr;
- }
-
- DeclResult VDecl = CheckVarTemplateId(VarTempl, TemplateKWLoc,
- MemberNameInfo.getLoc(), *TemplateArgs);
- if (VDecl.isInvalid())
- return nullptr;
- VarDecl *Var = cast<VarDecl>(VDecl.get());
- if (!Var->getTemplateSpecializationKind())
- Var->setTemplateSpecializationKind(TSK_ImplicitInstantiation,
- MemberNameInfo.getLoc());
- return Var;
-}
-
ExprResult
Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
SourceLocation OpLoc, bool IsArrow,
@@ -1099,19 +1075,11 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
if (!BaseExpr) {
// If this is not an instance member, convert to a non-member access.
if (!MemberDecl->isCXXInstanceMember()) {
- // If this is a variable template, get the instantiated variable
- // declaration corresponding to the supplied template arguments
- // (while emitting diagnostics as necessary) that will be referenced
- // by this expression.
- assert((!TemplateArgs || isa<VarTemplateDecl>(MemberDecl)) &&
- "How did we get template arguments here sans a variable template");
- if (isa<VarTemplateDecl>(MemberDecl)) {
- MemberDecl = getVarTemplateSpecialization(
- cast<VarTemplateDecl>(MemberDecl), TemplateArgs,
- R.getLookupNameInfo(), TemplateKWLoc);
- if (!MemberDecl)
- return ExprError();
- }
+ // We might have a variable template specialization (or maybe one day a
+ // member concept-id).
+ if (TemplateArgs || TemplateKWLoc.isValid())
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/false, TemplateArgs);
+
return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), MemberDecl,
FoundDecl, TemplateArgs);
}
@@ -1170,14 +1138,32 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
MemberNameInfo, Enum->getType(), VK_RValue,
OK_Ordinary);
}
+
if (VarTemplateDecl *VarTempl = dyn_cast<VarTemplateDecl>(MemberDecl)) {
- if (VarDecl *Var = getVarTemplateSpecialization(
- VarTempl, TemplateArgs, MemberNameInfo, TemplateKWLoc))
- return BuildMemberExpr(
- BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Var, FoundDecl,
- /*HadMultipleCandidates=*/false, MemberNameInfo,
- Var->getType().getNonReferenceType(), VK_LValue, OK_Ordinary);
- return ExprError();
+ if (!TemplateArgs) {
+ diagnoseMissingTemplateArguments(TemplateName(VarTempl), MemberLoc);
+ return ExprError();
+ }
+
+ DeclResult VDecl = CheckVarTemplateId(VarTempl, TemplateKWLoc,
+ MemberNameInfo.getLoc(), *TemplateArgs);
+ if (VDecl.isInvalid())
+ return ExprError();
+
+ // Non-dependent member, but dependent template arguments.
+ if (!VDecl.get())
+ return ActOnDependentMemberExpr(
+ BaseExpr, BaseExpr->getType(), IsArrow, OpLoc, SS, TemplateKWLoc,
+ FirstQualifierInScope, MemberNameInfo, TemplateArgs);
+
+ VarDecl *Var = cast<VarDecl>(VDecl.get());
+ if (!Var->getTemplateSpecializationKind())
+ Var->setTemplateSpecializationKind(TSK_ImplicitInstantiation, MemberLoc);
+
+ return BuildMemberExpr(
+ BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Var, FoundDecl,
+ /*HadMultipleCandidates=*/false, MemberNameInfo,
+ Var->getType().getNonReferenceType(), VK_LValue, OK_Ordinary);
}
// We found something that we didn't expect. Complain.
@@ -1748,14 +1734,28 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
}
void Sema::CheckMemberAccessOfNoDeref(const MemberExpr *E) {
- QualType ResultTy = E->getType();
-
- // Do not warn on member accesses to arrays since this returns an array
- // lvalue and does not actually dereference memory.
- if (isa<ArrayType>(ResultTy))
+ if (isUnevaluatedContext())
return;
- if (E->isArrow()) {
+ QualType ResultTy = E->getType();
+
+ // Member accesses have four cases:
+ // 1: non-array member via "->": dereferences
+ // 2: non-array member via ".": nothing interesting happens
+ // 3: array member access via "->": nothing interesting happens
+ // (this returns an array lvalue and does not actually dereference memory)
+ // 4: array member access via ".": *adds* a layer of indirection
+ if (ResultTy->isArrayType()) {
+ if (!E->isArrow()) {
+ // This might be something like:
+ // (*structPtr).arrayMember
+ // which behaves roughly like:
+ // &(*structPtr).pointerMember
+ // in that the apparent dereference in the base expression does not
+ // actually happen.
+ CheckAddressOfNoDeref(E->getBase());
+ }
+ } else if (E->isArrow()) {
if (const auto *Ptr = dyn_cast<PointerType>(
E->getBase()->getType().getDesugaredType(Context))) {
if (Ptr->getPointeeType()->hasAttr(attr::NoDeref))
@@ -1810,6 +1810,14 @@ Sema::BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
Qualifiers Combined = BaseQuals + MemberQuals;
if (Combined != MemberQuals)
MemberType = Context.getQualifiedType(MemberType, Combined);
+
+ // Pick up NoDeref from the base in case we end up using AddrOf on the
+ // result. E.g. the expression
+ // &someNoDerefPtr->pointerMember
+ // should be a noderef pointer again.
+ if (BaseType->hasAttr(attr::NoDeref))
+ MemberType =
+ Context.getAttributedType(attr::NoDeref, MemberType, MemberType);
}
auto *CurMethod = dyn_cast<CXXMethodDecl>(CurContext);
@@ -1854,7 +1862,6 @@ Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
// If this is known to be an instance access, go ahead and build an
// implicit 'this' expression now.
- // 'this' expression now.
QualType ThisTy = getCurrentThisType();
assert(!ThisTy.isNull() && "didn't correctly pre-flight capture of 'this'");
diff --git a/clang/lib/Sema/SemaExprObjC.cpp b/clang/lib/Sema/SemaExprObjC.cpp
index 228a1ec3ba1f..f5456ee0711e 100644
--- a/clang/lib/Sema/SemaExprObjC.cpp
+++ b/clang/lib/Sema/SemaExprObjC.cpp
@@ -1394,6 +1394,9 @@ ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
Diag(ProtoLoc, diag::err_undeclared_protocol) << ProtocolId;
return true;
}
+ if (PDecl->isNonRuntimeProtocol())
+ Diag(ProtoLoc, diag::err_objc_non_runtime_protocol_in_protocol_expr)
+ << PDecl;
if (!PDecl->hasDefinition()) {
Diag(ProtoLoc, diag::err_atprotocol_protocol) << PDecl;
Diag(PDecl->getLocation(), diag::note_entity_declared_at) << PDecl;
@@ -1560,12 +1563,20 @@ QualType Sema::getMessageSendResultType(const Expr *Receiver,
// Map the nullability of the result into a table index.
unsigned receiverNullabilityIdx = 0;
- if (auto nullability = ReceiverType->getNullability(Context))
+ if (Optional<NullabilityKind> nullability =
+ ReceiverType->getNullability(Context)) {
+ if (*nullability == NullabilityKind::NullableResult)
+ nullability = NullabilityKind::Nullable;
receiverNullabilityIdx = 1 + static_cast<unsigned>(*nullability);
+ }
unsigned resultNullabilityIdx = 0;
- if (auto nullability = resultType->getNullability(Context))
+ if (Optional<NullabilityKind> nullability =
+ resultType->getNullability(Context)) {
+ if (*nullability == NullabilityKind::NullableResult)
+ nullability = NullabilityKind::Nullable;
resultNullabilityIdx = 1 + static_cast<unsigned>(*nullability);
+ }
// The table of nullability mappings, indexed by the receiver's nullability
// and then the result type's nullability.
@@ -2445,8 +2456,8 @@ static void applyCocoaAPICheck(Sema &S, const ObjCMessageExpr *Msg,
SourceManager &SM = S.SourceMgr;
edit::Commit ECommit(SM, S.LangOpts);
if (refactor(Msg,*S.NSAPIObj, ECommit)) {
- DiagnosticBuilder Builder = S.Diag(MsgLoc, DiagID)
- << Msg->getSelector() << Msg->getSourceRange();
+ auto Builder = S.Diag(MsgLoc, DiagID)
+ << Msg->getSelector() << Msg->getSourceRange();
// FIXME: Don't emit diagnostic at all if fixits are non-commitable.
if (!ECommit.isCommitable())
return;
@@ -3139,9 +3150,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (ReceiverType->isObjCClassType() && !isImplicit &&
!(Receiver->isObjCSelfExpr() && getLangOpts().ObjCAutoRefCount)) {
{
- DiagnosticBuilder Builder =
- Diag(Receiver->getExprLoc(),
- diag::err_messaging_class_with_direct_method);
+ auto Builder = Diag(Receiver->getExprLoc(),
+ diag::err_messaging_class_with_direct_method);
if (Receiver->isObjCSelfExpr()) {
Builder.AddFixItHint(FixItHint::CreateReplacement(
RecRange, Method->getClassInterface()->getName()));
@@ -3153,7 +3163,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (SuperLoc.isValid()) {
{
- DiagnosticBuilder Builder =
+ auto Builder =
Diag(SuperLoc, diag::err_messaging_super_with_direct_method);
if (ReceiverType->isObjCClassType()) {
Builder.AddFixItHint(FixItHint::CreateReplacement(
@@ -3736,15 +3746,11 @@ bool Sema::isKnownName(StringRef name) {
return LookupName(R, TUScope, false);
}
-static void addFixitForObjCARCConversion(Sema &S,
- DiagnosticBuilder &DiagB,
- Sema::CheckedConversionKind CCK,
- SourceLocation afterLParen,
- QualType castType,
- Expr *castExpr,
- Expr *realCast,
- const char *bridgeKeyword,
- const char *CFBridgeName) {
+template <typename DiagBuilderT>
+static void addFixitForObjCARCConversion(
+ Sema &S, DiagBuilderT &DiagB, Sema::CheckedConversionKind CCK,
+ SourceLocation afterLParen, QualType castType, Expr *castExpr,
+ Expr *realCast, const char *bridgeKeyword, const char *CFBridgeName) {
// We handle C-style and implicit casts here.
switch (CCK) {
case Sema::CCK_ImplicitConversion:
@@ -3921,9 +3927,9 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
assert(CreateRule != ACC_bottom && "This cast should already be accepted.");
if (CreateRule != ACC_plusOne)
{
- DiagnosticBuilder DiagB =
- (CCK != Sema::CCK_OtherCast) ? S.Diag(noteLoc, diag::note_arc_bridge)
- : S.Diag(noteLoc, diag::note_arc_cstyle_bridge);
+ auto DiagB = (CCK != Sema::CCK_OtherCast)
+ ? S.Diag(noteLoc, diag::note_arc_bridge)
+ : S.Diag(noteLoc, diag::note_arc_cstyle_bridge);
addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
castType, castExpr, realCast, "__bridge ",
@@ -3931,12 +3937,12 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
}
if (CreateRule != ACC_plusZero)
{
- DiagnosticBuilder DiagB =
- (CCK == Sema::CCK_OtherCast && !br) ?
- S.Diag(noteLoc, diag::note_arc_cstyle_bridge_transfer) << castExprType :
- S.Diag(br ? castExpr->getExprLoc() : noteLoc,
- diag::note_arc_bridge_transfer)
- << castExprType << br;
+ auto DiagB = (CCK == Sema::CCK_OtherCast && !br)
+ ? S.Diag(noteLoc, diag::note_arc_cstyle_bridge_transfer)
+ << castExprType
+ : S.Diag(br ? castExpr->getExprLoc() : noteLoc,
+ diag::note_arc_bridge_transfer)
+ << castExprType << br;
addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
castType, castExpr, realCast, "__bridge_transfer ",
@@ -3962,21 +3968,21 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
assert(CreateRule != ACC_bottom && "This cast should already be accepted.");
if (CreateRule != ACC_plusOne)
{
- DiagnosticBuilder DiagB =
- (CCK != Sema::CCK_OtherCast) ? S.Diag(noteLoc, diag::note_arc_bridge)
- : S.Diag(noteLoc, diag::note_arc_cstyle_bridge);
+ auto DiagB = (CCK != Sema::CCK_OtherCast)
+ ? S.Diag(noteLoc, diag::note_arc_bridge)
+ : S.Diag(noteLoc, diag::note_arc_cstyle_bridge);
addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
castType, castExpr, realCast, "__bridge ",
nullptr);
}
if (CreateRule != ACC_plusZero)
{
- DiagnosticBuilder DiagB =
- (CCK == Sema::CCK_OtherCast && !br) ?
- S.Diag(noteLoc, diag::note_arc_cstyle_bridge_retained) << castType :
- S.Diag(br ? castExpr->getExprLoc() : noteLoc,
- diag::note_arc_bridge_retained)
- << castType << br;
+ auto DiagB = (CCK == Sema::CCK_OtherCast && !br)
+ ? S.Diag(noteLoc, diag::note_arc_cstyle_bridge_retained)
+ << castType
+ : S.Diag(br ? castExpr->getExprLoc() : noteLoc,
+ diag::note_arc_bridge_retained)
+ << castType << br;
addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
castType, castExpr, realCast, "__bridge_retained ",
@@ -4462,8 +4468,8 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
// If the result is +1, consume it here.
case ACC_plusOne:
castExpr = ImplicitCastExpr::Create(Context, castExpr->getType(),
- CK_ARCConsumeObject, castExpr,
- nullptr, VK_RValue);
+ CK_ARCConsumeObject, castExpr, nullptr,
+ VK_RValue, FPOptionsOverride());
Cleanup.setExprNeedsCleanups(true);
return ACR_okay;
}
@@ -4689,9 +4695,9 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
case OBC_BridgeRetained:
// Produce the object before casting it.
- SubExpr = ImplicitCastExpr::Create(Context, FromType,
- CK_ARCProduceObject,
- SubExpr, nullptr, VK_RValue);
+ SubExpr = ImplicitCastExpr::Create(Context, FromType, CK_ARCProduceObject,
+ SubExpr, nullptr, VK_RValue,
+ FPOptionsOverride());
break;
case OBC_BridgeTransfer: {
@@ -4730,7 +4736,7 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
if (MustConsume) {
Cleanup.setExprNeedsCleanups(true);
Result = ImplicitCastExpr::Create(Context, T, CK_ARCConsumeObject, Result,
- nullptr, VK_RValue);
+ nullptr, VK_RValue, FPOptionsOverride());
}
return Result;
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index eb07de65d266..f4493d84238d 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -141,6 +141,10 @@ static StringInitFailureKind IsStringInit(Expr *init, QualType declType,
return IsStringInit(init, arrayType, Context);
}
+bool Sema::IsStringInit(Expr *Init, const ArrayType *AT) {
+ return ::IsStringInit(Init, AT, Context) == SIF_None;
+}
+
/// Update the type of a string literal, including any surrounding parentheses,
/// to match the type of the object which it is initializing.
static void updateStringLiteralType(Expr *E, QualType Ty) {
@@ -962,6 +966,8 @@ InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
FillInEmptyInitializations(Entity, FullyStructuredList,
RequiresSecondPass, nullptr, 0);
}
+ if (hadError && FullyStructuredList)
+ FullyStructuredList->markError();
}
int InitListChecker::numArrayElements(QualType DeclType) {
@@ -1117,6 +1123,7 @@ static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_Parameter:
case InitializedEntity::EK_Parameter_CF_Audited:
+ case InitializedEntity::EK_TemplateParameter:
case InitializedEntity::EK_Result:
// Extra braces here are suspicious.
DiagID = diag::warn_braces_around_init;
@@ -1583,10 +1590,7 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
IList->setInit(Index, ResultExpr);
}
}
- if (hadError)
- ++StructuredIndex;
- else
- UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
+ UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
++Index;
}
@@ -1641,10 +1645,7 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
if (!VerifyOnly && expr)
IList->setInit(Index, expr);
- if (hadError)
- ++StructuredIndex;
- else
- UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
+ UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
++Index;
}
@@ -1695,11 +1696,7 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
IList->setInit(Index, ResultExpr);
}
}
- if (hadError)
- ++StructuredIndex;
- else
- UpdateStructuredListElement(StructuredList, StructuredIndex,
- ResultExpr);
+ UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
++Index;
return;
}
@@ -2898,8 +2895,9 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
Expr *Init = new (Context) IntegerLiteral(
Context, CodeUnit, PromotedCharTy, SubExpr->getExprLoc());
if (CharTy != PromotedCharTy)
- Init = ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast,
- Init, nullptr, VK_RValue);
+ Init =
+ ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast, Init,
+ nullptr, VK_RValue, FPOptionsOverride());
StructuredList->updateInit(Context, i, Init);
}
} else {
@@ -2920,8 +2918,9 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
Expr *Init = new (Context) IntegerLiteral(
Context, CodeUnit, PromotedCharTy, SubExpr->getExprLoc());
if (CharTy != PromotedCharTy)
- Init = ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast,
- Init, nullptr, VK_RValue);
+ Init =
+ ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast, Init,
+ nullptr, VK_RValue, FPOptionsOverride());
StructuredList->updateInit(Context, i, Init);
}
}
@@ -3098,8 +3097,12 @@ void InitListChecker::UpdateStructuredListElement(InitListExpr *StructuredList,
if (Expr *PrevInit = StructuredList->updateInit(SemaRef.Context,
StructuredIndex, expr)) {
- // This initializer overwrites a previous initializer. Warn.
- diagnoseInitOverride(PrevInit, expr->getSourceRange());
+ // This initializer overwrites a previous initializer.
+ // No need to diagnose when `expr` is nullptr because a more relevant
+ // diagnostic has already been issued and this diagnostic is potentially
+ // noise.
+ if (expr)
+ diagnoseInitOverride(PrevInit, expr->getSourceRange());
}
++StructuredIndex;
@@ -3128,7 +3131,8 @@ CheckArrayDesignatorExpr(Sema &S, Expr *Index, llvm::APSInt &Value) {
SourceLocation Loc = Index->getBeginLoc();
// Make sure this is an integer constant expression.
- ExprResult Result = S.VerifyIntegerConstantExpression(Index, &Value);
+ ExprResult Result =
+ S.VerifyIntegerConstantExpression(Index, &Value, Sema::AllowFold);
if (Result.isInvalid())
return Result;
@@ -3280,6 +3284,7 @@ DeclarationName InitializedEntity::getName() const {
case EK_Variable:
case EK_Member:
case EK_Binding:
+ case EK_TemplateParameter:
return Variable.VariableOrMember->getDeclName();
case EK_LambdaCapture:
@@ -3310,6 +3315,7 @@ ValueDecl *InitializedEntity::getDecl() const {
case EK_Variable:
case EK_Member:
case EK_Binding:
+ case EK_TemplateParameter:
return Variable.VariableOrMember;
case EK_Parameter:
@@ -3347,6 +3353,7 @@ bool InitializedEntity::allowsNRVO() const {
case EK_Variable:
case EK_Parameter:
case EK_Parameter_CF_Audited:
+ case EK_TemplateParameter:
case EK_Member:
case EK_Binding:
case EK_New:
@@ -3378,6 +3385,7 @@ unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const {
case EK_Parameter: OS << "Parameter"; break;
case EK_Parameter_CF_Audited: OS << "CF audited function Parameter";
break;
+ case EK_TemplateParameter: OS << "TemplateParameter"; break;
case EK_Result: OS << "Result"; break;
case EK_StmtExprResult: OS << "StmtExprResult"; break;
case EK_Exception: OS << "Exception"; break;
@@ -3434,6 +3442,7 @@ void InitializationSequence::Step::Destroy() {
case SK_QualificationConversionRValue:
case SK_QualificationConversionXValue:
case SK_QualificationConversionLValue:
+ case SK_FunctionReferenceConversion:
case SK_AtomicConversion:
case SK_ListInitialization:
case SK_UnwrapInitList:
@@ -3612,6 +3621,13 @@ void InitializationSequence::AddQualificationConversionStep(QualType Ty,
Steps.push_back(S);
}
+void InitializationSequence::AddFunctionReferenceConversionStep(QualType Ty) {
+ Step S;
+ S.Kind = SK_FunctionReferenceConversion;
+ S.Type = Ty;
+ Steps.push_back(S);
+}
+
void InitializationSequence::AddAtomicConversionStep(QualType Ty) {
Step S;
S.Kind = SK_AtomicConversion;
@@ -4099,11 +4115,13 @@ static void TryConstructorInitialization(Sema &S,
IsListInit);
}
if (Result) {
- Sequence.SetOverloadFailure(IsListInit ?
- InitializationSequence::FK_ListConstructorOverloadFailed :
- InitializationSequence::FK_ConstructorOverloadFailed,
- Result);
- return;
+ Sequence.SetOverloadFailure(
+ IsListInit ? InitializationSequence::FK_ListConstructorOverloadFailed
+ : InitializationSequence::FK_ConstructorOverloadFailed,
+ Result);
+
+ if (Result != OR_Deleted)
+ return;
}
bool HadMultipleCandidates = (CandidateSet.size() > 1);
@@ -4124,31 +4142,45 @@ static void TryConstructorInitialization(Sema &S,
return;
}
- // C++11 [dcl.init]p6:
- // If a program calls for the default initialization of an object
- // of a const-qualified type T, T shall be a class type with a
- // user-provided default constructor.
- // C++ core issue 253 proposal:
- // If the implicit default constructor initializes all subobjects, no
- // initializer should be required.
- // The 253 proposal is for example needed to process libstdc++ headers in 5.x.
CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function);
- if (Kind.getKind() == InitializationKind::IK_Default &&
- Entity.getType().isConstQualified()) {
- if (!CtorDecl->getParent()->allowConstDefaultInit()) {
- if (!maybeRecoverWithZeroInitialization(S, Sequence, Entity))
- Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
+ if (Result != OR_Deleted) {
+ // C++11 [dcl.init]p6:
+ // If a program calls for the default initialization of an object
+ // of a const-qualified type T, T shall be a class type with a
+ // user-provided default constructor.
+ // C++ core issue 253 proposal:
+ // If the implicit default constructor initializes all subobjects, no
+ // initializer should be required.
+ // The 253 proposal is for example needed to process libstdc++ headers
+ // in 5.x.
+ if (Kind.getKind() == InitializationKind::IK_Default &&
+ Entity.getType().isConstQualified()) {
+ if (!CtorDecl->getParent()->allowConstDefaultInit()) {
+ if (!maybeRecoverWithZeroInitialization(S, Sequence, Entity))
+ Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
+ return;
+ }
+ }
+
+ // C++11 [over.match.list]p1:
+ // In copy-list-initialization, if an explicit constructor is chosen, the
+ // initializer is ill-formed.
+ if (IsListInit && !Kind.AllowExplicit() && CtorDecl->isExplicit()) {
+ Sequence.SetFailed(InitializationSequence::FK_ExplicitConstructor);
return;
}
}
- // C++11 [over.match.list]p1:
- // In copy-list-initialization, if an explicit constructor is chosen, the
- // initializer is ill-formed.
- if (IsListInit && !Kind.AllowExplicit() && CtorDecl->isExplicit()) {
- Sequence.SetFailed(InitializationSequence::FK_ExplicitConstructor);
+ // [class.copy.elision]p3:
+ // In some copy-initialization contexts, a two-stage overload resolution
+ // is performed.
+ // If the first overload resolution selects a deleted function, we also
+ // need the initialization sequence to decide whether to perform the second
+ // overload resolution.
+ // For deleted functions in other contexts, there is no need to get the
+ // initialization sequence.
+ if (Result == OR_Deleted && Kind.getKind() != InitializationKind::IK_Copy)
return;
- }
// Add the constructor initialization step. Any cv-qualification conversion is
// subsumed by the initialization.
@@ -4232,7 +4264,7 @@ static void TryReferenceListInitialization(Sema &S,
// bind to that.
if (InitList->getNumInits() == 1) {
Expr *Initializer = InitList->getInit(0);
- QualType cv2T2 = Initializer->getType();
+ QualType cv2T2 = S.getCompletedType(Initializer);
Qualifiers T2Quals;
QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals);
@@ -4645,7 +4677,7 @@ static OverloadingResult TryRefInitWithConversionFunction(
else if (RefConv & Sema::ReferenceConversions::ObjC)
Sequence.AddObjCObjectConversionStep(cv1T1);
else if (RefConv & Sema::ReferenceConversions::Function)
- Sequence.AddQualificationConversionStep(cv1T1, VK);
+ Sequence.AddFunctionReferenceConversionStep(cv1T1);
else if (RefConv & Sema::ReferenceConversions::Qualification) {
if (!S.Context.hasSameType(cv1T4, cv1T1))
Sequence.AddQualificationConversionStep(cv1T1, VK);
@@ -4668,7 +4700,7 @@ static void TryReferenceInitialization(Sema &S,
QualType cv1T1 = DestType->castAs<ReferenceType>()->getPointeeType();
Qualifiers T1Quals;
QualType T1 = S.Context.getUnqualifiedArrayType(cv1T1, T1Quals);
- QualType cv2T2 = Initializer->getType();
+ QualType cv2T2 = S.getCompletedType(Initializer);
Qualifiers T2Quals;
QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals);
@@ -4747,12 +4779,12 @@ static void TryReferenceInitializationCore(Sema &S,
Sequence.AddDerivedToBaseCastStep(cv1T1, VK_LValue);
else
Sequence.AddObjCObjectConversionStep(cv1T1);
- } else if (RefConv & (Sema::ReferenceConversions::Qualification |
- Sema::ReferenceConversions::Function)) {
+ } else if (RefConv & Sema::ReferenceConversions::Qualification) {
// Perform a (possibly multi-level) qualification conversion.
- // FIXME: Should we use a different step kind for function conversions?
Sequence.AddQualificationConversionStep(cv1T1,
Initializer->getValueKind());
+ } else if (RefConv & Sema::ReferenceConversions::Function) {
+ Sequence.AddFunctionReferenceConversionStep(cv1T1);
}
// We only create a temporary here when binding a reference to a
@@ -5242,9 +5274,17 @@ static void TryUserDefinedConversion(Sema &S,
if (OverloadingResult Result
= CandidateSet.BestViableFunction(S, DeclLoc, Best)) {
Sequence.SetOverloadFailure(
- InitializationSequence::FK_UserConversionOverloadFailed,
- Result);
- return;
+ InitializationSequence::FK_UserConversionOverloadFailed, Result);
+
+ // [class.copy.elision]p3:
+ // In some copy-initialization contexts, a two-stage overload resolution
+ // is performed.
+ // If the first overload resolution selects a deleted function, we also
+ // need the initialization sequence to decide whether to perform the second
+ // overload resolution.
+ if (!(Result == OR_Deleted &&
+ Kind.getKind() == InitializationKind::IK_Copy))
+ return;
}
FunctionDecl *Function = Best->Function;
@@ -5548,13 +5588,11 @@ static bool TryOCLZeroOpaqueTypeInitialization(Sema &S,
return false;
}
-InitializationSequence::InitializationSequence(Sema &S,
- const InitializedEntity &Entity,
- const InitializationKind &Kind,
- MultiExprArg Args,
- bool TopLevelOfInitList,
- bool TreatUnavailableAsInvalid)
- : FailedCandidateSet(Kind.getLocation(), OverloadCandidateSet::CSK_Normal) {
+InitializationSequence::InitializationSequence(
+ Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind,
+ MultiExprArg Args, bool TopLevelOfInitList, bool TreatUnavailableAsInvalid)
+ : FailedOverloadResult(OR_Success),
+ FailedCandidateSet(Kind.getLocation(), OverloadCandidateSet::CSK_Normal) {
InitializeFrom(S, Entity, Kind, Args, TopLevelOfInitList,
TreatUnavailableAsInvalid);
}
@@ -5998,6 +6036,11 @@ getAssignmentAction(const InitializedEntity &Entity, bool Diagnose = false) {
// FIXME: Can we tell apart casting vs. converting?
return Sema::AA_Casting;
+ case InitializedEntity::EK_TemplateParameter:
+ // This is really initialization, but refer to it as conversion for
+ // consistency with CheckConvertedConstantExpression.
+ return Sema::AA_Converting;
+
case InitializedEntity::EK_Member:
case InitializedEntity::EK_Binding:
case InitializedEntity::EK_ArrayElement:
@@ -6032,6 +6075,7 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
case InitializedEntity::EK_LambdaCapture:
case InitializedEntity::EK_CompoundLiteralInit:
+ case InitializedEntity::EK_TemplateParameter:
return false;
case InitializedEntity::EK_Parameter:
@@ -6066,6 +6110,7 @@ static bool shouldDestroyEntity(const InitializedEntity &Entity) {
case InitializedEntity::EK_Variable:
case InitializedEntity::EK_Parameter:
case InitializedEntity::EK_Parameter_CF_Audited:
+ case InitializedEntity::EK_TemplateParameter:
case InitializedEntity::EK_Temporary:
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_Exception:
@@ -6099,6 +6144,7 @@ static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
case InitializedEntity::EK_Member:
case InitializedEntity::EK_Parameter:
case InitializedEntity::EK_Parameter_CF_Audited:
+ case InitializedEntity::EK_TemplateParameter:
case InitializedEntity::EK_Temporary:
case InitializedEntity::EK_New:
case InitializedEntity::EK_Base:
@@ -6342,7 +6388,7 @@ static void CheckCXX98CompatAccessibleCopy(Sema &S,
void InitializationSequence::PrintInitLocationNote(Sema &S,
const InitializedEntity &Entity) {
- if (Entity.isParameterKind() && Entity.getDecl()) {
+ if (Entity.isParamOrTemplateParamKind() && Entity.getDecl()) {
if (Entity.getDecl()->getLocation().isInvalid())
return;
@@ -6608,6 +6654,10 @@ static LifetimeResult getEntityLifetime(
// the call.
return {nullptr, LK_FullExpression};
+ case InitializedEntity::EK_TemplateParameter:
+ // FIXME: This will always be ill-formed; should we eagerly diagnose it here?
+ return {nullptr, LK_FullExpression};
+
case InitializedEntity::EK_Result:
// -- The lifetime of a temporary bound to the returned value in a
// function return statement is not extended; the temporary is
@@ -6690,15 +6740,22 @@ struct IndirectLocalPathEntry {
VarInit,
LValToRVal,
LifetimeBoundCall,
+ TemporaryCopy,
+ LambdaCaptureInit,
GslReferenceInit,
GslPointerInit
} Kind;
Expr *E;
- const Decl *D = nullptr;
+ union {
+ const Decl *D = nullptr;
+ const LambdaCapture *Capture;
+ };
IndirectLocalPathEntry() {}
IndirectLocalPathEntry(EntryKind K, Expr *E) : Kind(K), E(E) {}
IndirectLocalPathEntry(EntryKind K, Expr *E, const Decl *D)
: Kind(K), E(E), D(D) {}
+ IndirectLocalPathEntry(EntryKind K, Expr *E, const LambdaCapture *Capture)
+ : Kind(K), E(E), Capture(Capture) {}
};
using IndirectLocalPath = llvm::SmallVectorImpl<IndirectLocalPathEntry>;
@@ -6892,6 +6949,26 @@ static bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD) {
if (ATL.getAttrAs<LifetimeBoundAttr>())
return true;
}
+
+ // Assume that all assignment operators with a "normal" return type return
+ // *this, that is, an lvalue reference that is the same type as the implicit
+ // object parameter (or the LHS for a non-member operator$=).
+ OverloadedOperatorKind OO = FD->getDeclName().getCXXOverloadedOperator();
+ if (OO == OO_Equal || isCompoundAssignmentOperator(OO)) {
+ QualType RetT = FD->getReturnType();
+ if (RetT->isLValueReferenceType()) {
+ ASTContext &Ctx = FD->getASTContext();
+ QualType LHST;
+ auto *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD && MD->isCXXInstanceMember())
+ LHST = Ctx.getLValueReferenceType(MD->getThisObjectType());
+ else
+ LHST = MD->getParamDecl(0)->getType();
+ if (Ctx.hasSameType(RetT, LHST))
+ return true;
+ }
+ }
+
return false;
}
@@ -7237,15 +7314,37 @@ static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
// The lifetime of an init-capture is that of the closure object constructed
// by a lambda-expression.
if (auto *LE = dyn_cast<LambdaExpr>(Init)) {
+ LambdaExpr::capture_iterator CapI = LE->capture_begin();
for (Expr *E : LE->capture_inits()) {
+ assert(CapI != LE->capture_end());
+ const LambdaCapture &Cap = *CapI++;
if (!E)
continue;
+ if (Cap.capturesVariable())
+ Path.push_back({IndirectLocalPathEntry::LambdaCaptureInit, E, &Cap});
if (E->isGLValue())
visitLocalsRetainedByReferenceBinding(Path, E, RK_ReferenceBinding,
Visit, EnableLifetimeWarnings);
else
visitLocalsRetainedByInitializer(Path, E, Visit, true,
EnableLifetimeWarnings);
+ if (Cap.capturesVariable())
+ Path.pop_back();
+ }
+ }
+
+ // Assume that a copy or move from a temporary references the same objects
+ // that the temporary does.
+ if (auto *CCE = dyn_cast<CXXConstructExpr>(Init)) {
+ if (CCE->getConstructor()->isCopyOrMoveConstructor()) {
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(CCE->getArg(0))) {
+ Expr *Arg = MTE->getSubExpr();
+ Path.push_back({IndirectLocalPathEntry::TemporaryCopy, Arg,
+ CCE->getConstructor()});
+ visitLocalsRetainedByInitializer(Path, Arg, Visit, true,
+ /*EnableLifetimeWarnings*/false);
+ Path.pop_back();
+ }
}
}
@@ -7322,14 +7421,31 @@ static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
}
}
+/// Whether a path to an object supports lifetime extension.
+enum PathLifetimeKind {
+ /// Lifetime-extend along this path.
+ Extend,
+ /// We should lifetime-extend, but we don't because (due to technical
+ /// limitations) we can't. This happens for default member initializers,
+ /// which we don't clone for every use, so we don't have a unique
+ /// MaterializeTemporaryExpr to update.
+ ShouldExtend,
+ /// Do not lifetime extend along this path.
+ NoExtend
+};
+
/// Determine whether this is an indirect path to a temporary that we are
-/// supposed to lifetime-extend along (but don't).
-static bool shouldLifetimeExtendThroughPath(const IndirectLocalPath &Path) {
+/// supposed to lifetime-extend along.
+static PathLifetimeKind
+shouldLifetimeExtendThroughPath(const IndirectLocalPath &Path) {
+ PathLifetimeKind Kind = PathLifetimeKind::Extend;
for (auto Elem : Path) {
- if (Elem.Kind != IndirectLocalPathEntry::DefaultInit)
- return false;
+ if (Elem.Kind == IndirectLocalPathEntry::DefaultInit)
+ Kind = PathLifetimeKind::ShouldExtend;
+ else if (Elem.Kind != IndirectLocalPathEntry::LambdaCaptureInit)
+ return PathLifetimeKind::NoExtend;
}
- return true;
+ return Kind;
}
/// Find the range for the first interesting entry in the path at or after I.
@@ -7340,6 +7456,7 @@ static SourceRange nextPathEntryRange(const IndirectLocalPath &Path, unsigned I,
case IndirectLocalPathEntry::AddressOf:
case IndirectLocalPathEntry::LValToRVal:
case IndirectLocalPathEntry::LifetimeBoundCall:
+ case IndirectLocalPathEntry::TemporaryCopy:
case IndirectLocalPathEntry::GslReferenceInit:
case IndirectLocalPathEntry::GslPointerInit:
// These exist primarily to mark the path as not permitting or
@@ -7352,6 +7469,11 @@ static SourceRange nextPathEntryRange(const IndirectLocalPath &Path, unsigned I,
LLVM_FALLTHROUGH;
case IndirectLocalPathEntry::DefaultInit:
return Path[I].E->getSourceRange();
+
+ case IndirectLocalPathEntry::LambdaCaptureInit:
+ if (!Path[I].Capture->capturesVariable())
+ continue;
+ return Path[I].E->getSourceRange();
}
}
return E->getSourceRange();
@@ -7429,17 +7551,16 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
return false;
}
- // Lifetime-extend the temporary.
- if (Path.empty()) {
+ switch (shouldLifetimeExtendThroughPath(Path)) {
+ case PathLifetimeKind::Extend:
// Update the storage duration of the materialized temporary.
// FIXME: Rebuild the expression instead of mutating it.
MTE->setExtendingDecl(ExtendingEntity->getDecl(),
ExtendingEntity->allocateManglingNumber());
// Also visit the temporaries lifetime-extended by this initializer.
return true;
- }
- if (shouldLifetimeExtendThroughPath(Path)) {
+ case PathLifetimeKind::ShouldExtend:
// We're supposed to lifetime-extend the temporary along this path (per
// the resolution of DR1815), but we don't support that yet.
//
@@ -7448,7 +7569,9 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
// lifetime extend its temporaries.
Diag(DiagLoc, diag::warn_unsupported_lifetime_extension)
<< RK << DiagRange;
- } else {
+ break;
+
+ case PathLifetimeKind::NoExtend:
// If the path goes through the initialization of a variable or field,
// it can't possibly reach a temporary created in this full-expression.
// We will have already diagnosed any problems with the initializer.
@@ -7459,6 +7582,7 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
<< RK << !Entity.getParent()
<< ExtendingEntity->getDecl()->isImplicit()
<< ExtendingEntity->getDecl() << Init->isGLValue() << DiagRange;
+ break;
}
break;
}
@@ -7479,7 +7603,8 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
return false;
}
bool IsSubobjectMember = ExtendingEntity != &Entity;
- Diag(DiagLoc, shouldLifetimeExtendThroughPath(Path)
+ Diag(DiagLoc, shouldLifetimeExtendThroughPath(Path) !=
+ PathLifetimeKind::NoExtend
? diag::err_dangling_member
: diag::warn_dangling_member)
<< ExtendingDecl << IsSubobjectMember << RK << DiagRange;
@@ -7586,6 +7711,7 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
break;
case IndirectLocalPathEntry::LifetimeBoundCall:
+ case IndirectLocalPathEntry::TemporaryCopy:
case IndirectLocalPathEntry::GslPointerInit:
case IndirectLocalPathEntry::GslReferenceInit:
// FIXME: Consider adding a note for these.
@@ -7598,7 +7724,7 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
break;
}
- case IndirectLocalPathEntry::VarInit:
+ case IndirectLocalPathEntry::VarInit: {
const VarDecl *VD = cast<VarDecl>(Elem.D);
Diag(VD->getLocation(), diag::note_local_var_initializer)
<< VD->getType()->isReferenceType()
@@ -7606,6 +7732,19 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
<< nextPathEntryRange(Path, I + 1, L);
break;
}
+
+ case IndirectLocalPathEntry::LambdaCaptureInit:
+ if (!Elem.Capture->capturesVariable())
+ break;
+ // FIXME: We can't easily tell apart an init-capture from a nested
+ // capture of an init-capture.
+ const VarDecl *VD = Elem.Capture->getCapturedVar();
+ Diag(Elem.Capture->getLocation(), diag::note_lambda_capture_initializer)
+ << VD << VD->isInitCapture() << Elem.Capture->isExplicit()
+ << (Elem.Capture->getCaptureKind() == LCK_ByRef) << VD
+ << nextPathEntryRange(Path, I + 1, L);
+ break;
+ }
}
// We didn't lifetime-extend, so don't go any further; we don't need more
@@ -7879,7 +8018,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
if (S.getLangOpts().CPlusPlus11 && Entity.getType()->isReferenceType() &&
Args.size() == 1 && isa<InitListExpr>(Args[0]) &&
- !Entity.isParameterKind()) {
+ !Entity.isParamOrTemplateParamKind()) {
// Produce a C++98 compatibility warning if we are initializing a reference
// from an initializer list. For parameters, we produce a better warning
// elsewhere.
@@ -7929,6 +8068,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_QualificationConversionLValue:
case SK_QualificationConversionXValue:
case SK_QualificationConversionRValue:
+ case SK_FunctionReferenceConversion:
case SK_AtomicConversion:
case SK_ConversionSequence:
case SK_ConversionSequenceNoNarrowing:
@@ -8023,9 +8163,9 @@ ExprResult InitializationSequence::Perform(Sema &S,
(Step->Kind == SK_CastDerivedToBaseXValue ?
VK_XValue :
VK_RValue);
- CurInit =
- ImplicitCastExpr::Create(S.Context, Step->Type, CK_DerivedToBase,
- CurInit.get(), &BasePath, VK);
+ CurInit = ImplicitCastExpr::Create(S.Context, Step->Type,
+ CK_DerivedToBase, CurInit.get(),
+ &BasePath, VK, FPOptionsOverride());
break;
}
@@ -8060,9 +8200,21 @@ ExprResult InitializationSequence::Perform(Sema &S,
if (S.CheckExceptionSpecCompatibility(CurInit.get(), DestType))
return ExprError();
+ QualType MTETy = Step->Type;
+
+ // When this is an incomplete array type (such as when this is
+ // initializing an array of unknown bounds from an init list), use THAT
+ // type instead so that we propogate the array bounds.
+ if (MTETy->isIncompleteArrayType() &&
+ !CurInit.get()->getType()->isIncompleteArrayType() &&
+ S.Context.hasSameType(
+ MTETy->getPointeeOrArrayElementType(),
+ CurInit.get()->getType()->getPointeeOrArrayElementType()))
+ MTETy = CurInit.get()->getType();
+
// Materialize the temporary into memory.
MaterializeTemporaryExpr *MTE = S.CreateMaterializeTemporaryExpr(
- Step->Type, CurInit.get(), Entity.getType()->isLValueReferenceType());
+ MTETy, CurInit.get(), Entity.getType()->isLValueReferenceType());
CurInit = MTE;
// If we're extending this temporary to automatic storage duration -- we
@@ -8154,9 +8306,9 @@ ExprResult InitializationSequence::Perform(Sema &S,
if (CreatedObject && checkAbstractType(CurInit.get()->getType()))
return ExprError();
- CurInit = ImplicitCastExpr::Create(S.Context, CurInit.get()->getType(),
- CastKind, CurInit.get(), nullptr,
- CurInit.get()->getValueKind());
+ CurInit = ImplicitCastExpr::Create(
+ S.Context, CurInit.get()->getType(), CastKind, CurInit.get(), nullptr,
+ CurInit.get()->getValueKind(), S.CurFPFeatureOverrides());
if (shouldBindAsTemporary(Entity))
// The overall entity is temporary, so this expression should be
@@ -8194,6 +8346,13 @@ ExprResult InitializationSequence::Perform(Sema &S,
break;
}
+ case SK_FunctionReferenceConversion:
+ assert(CurInit.get()->isLValue() &&
+ "function reference should be lvalue");
+ CurInit =
+ S.ImpCastExprToType(CurInit.get(), Step->Type, CK_NoOp, VK_LValue);
+ break;
+
case SK_AtomicConversion: {
assert(CurInit.get()->isRValue() && "cannot convert glvalue to atomic");
CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type,
@@ -8422,7 +8581,8 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_StringInit: {
QualType Ty = Step->Type;
- CheckStringInit(CurInit.get(), ResultType ? *ResultType : Ty,
+ bool UpdateType = ResultType && Entity.getType()->isIncompleteArrayType();
+ CheckStringInit(CurInit.get(), UpdateType ? *ResultType : Ty,
S.Context.getAsArrayType(Ty), S);
break;
}
@@ -8497,9 +8657,9 @@ ExprResult InitializationSequence::Perform(Sema &S,
break;
case SK_ProduceObjCObject:
- CurInit =
- ImplicitCastExpr::Create(S.Context, Step->Type, CK_ARCProduceObject,
- CurInit.get(), nullptr, VK_RValue);
+ CurInit = ImplicitCastExpr::Create(
+ S.Context, Step->Type, CK_ARCProduceObject, CurInit.get(), nullptr,
+ VK_RValue, FPOptionsOverride());
break;
case SK_StdInitializerList: {
@@ -8553,9 +8713,9 @@ ExprResult InitializationSequence::Perform(Sema &S,
// Case 1b and 1c
// No cast from integer to sampler is needed.
if (!Var->hasGlobalStorage()) {
- CurInit = ImplicitCastExpr::Create(S.Context, Step->Type,
- CK_LValueToRValue, Init,
- /*BasePath=*/nullptr, VK_RValue);
+ CurInit = ImplicitCastExpr::Create(
+ S.Context, Step->Type, CK_LValueToRValue, Init,
+ /*BasePath=*/nullptr, VK_RValue, FPOptionsOverride());
break;
}
// Case 1a
@@ -8705,6 +8865,16 @@ static void emitBadConversionNotes(Sema &S, const InitializedEntity &entity,
if (entity.getKind() == InitializedEntity::EK_Result)
S.EmitRelatedResultTypeNoteForReturn(destType);
}
+ QualType fromType = op->getType();
+ auto *fromDecl = fromType.getTypePtr()->getPointeeCXXRecordDecl();
+ auto *destDecl = destType.getTypePtr()->getPointeeCXXRecordDecl();
+ if (fromDecl && destDecl && fromDecl->getDeclKind() == Decl::CXXRecord &&
+ destDecl->getDeclKind() == Decl::CXXRecord &&
+ !fromDecl->isInvalidDecl() && !destDecl->isInvalidDecl() &&
+ !fromDecl->hasDefinition())
+ S.Diag(fromDecl->getLocation(), diag::note_forward_class_conversion)
+ << S.getASTContext().getTagDeclType(fromDecl)
+ << S.getASTContext().getTagDeclType(destDecl);
}
static void diagnoseListInit(Sema &S, const InitializedEntity &Entity,
@@ -9443,6 +9613,10 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "qualification conversion (lvalue)";
break;
+ case SK_FunctionReferenceConversion:
+ OS << "function reference conversion";
+ break;
+
case SK_AtomicConversion:
OS << "non-atomic-to-atomic conversion";
break;
@@ -9747,7 +9921,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
auto TemplateName = DeducedTST->getTemplateName();
if (TemplateName.isDependent())
- return Context.DependentTy;
+ return SubstAutoType(TSInfo->getType(), Context.DependentTy);
// We can only perform deduction for class templates.
auto *Template =
@@ -9766,7 +9940,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
Diag(TSInfo->getTypeLoc().getBeginLoc(),
diag::warn_cxx14_compat_class_template_argument_deduction)
<< TSInfo->getTypeLoc().getSourceRange() << 0;
- return Context.DependentTy;
+ return SubstAutoType(TSInfo->getType(), Context.DependentTy);
}
// FIXME: Perform "exact type" matching first, per CWG discussion?
diff --git a/clang/lib/Sema/SemaLambda.cpp b/clang/lib/Sema/SemaLambda.cpp
index 657ed13f207a..af61c82c2002 100644
--- a/clang/lib/Sema/SemaLambda.cpp
+++ b/clang/lib/Sema/SemaLambda.cpp
@@ -233,7 +233,7 @@ getGenericLambdaTemplateParameterList(LambdaScopeInfo *LSI, Sema &SemaRef) {
/*L angle loc*/ LSI->ExplicitTemplateParamsRange.getBegin(),
LSI->TemplateParams,
/*R angle loc*/LSI->ExplicitTemplateParamsRange.getEnd(),
- nullptr);
+ LSI->RequiresClause.get());
}
return LSI->GLTemplateParameterList;
}
@@ -520,7 +520,8 @@ void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) {
void Sema::ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
- SourceLocation RAngleLoc) {
+ SourceLocation RAngleLoc,
+ ExprResult RequiresClause) {
LambdaScopeInfo *LSI = getCurLambda();
assert(LSI && "Expected a lambda scope");
assert(LSI->NumExplicitTemplateParams == 0 &&
@@ -533,6 +534,7 @@ void Sema::ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
LSI->TemplateParams.append(TParams.begin(), TParams.end());
LSI->NumExplicitTemplateParams = TParams.size();
LSI->ExplicitTemplateParamsRange = {LAngleLoc, RAngleLoc};
+ LSI->RequiresClause = RequiresClause;
}
void Sema::addLambdaParameters(
@@ -680,8 +682,9 @@ static void adjustBlockReturnsToEnum(Sema &S, ArrayRef<ReturnStmt*> returns,
ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(retValue);
Expr *E = (cleanups ? cleanups->getSubExpr() : retValue);
- E = ImplicitCastExpr::Create(S.Context, returnType, CK_IntegralCast,
- E, /*base path*/ nullptr, VK_RValue);
+ E = ImplicitCastExpr::Create(S.Context, returnType, CK_IntegralCast, E,
+ /*base path*/ nullptr, VK_RValue,
+ FPOptionsOverride());
if (cleanups) {
cleanups->setSubExpr(E);
} else {
@@ -803,7 +806,8 @@ QualType Sema::buildLambdaInitCaptureInitialization(
Diag(EllipsisLoc, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_init_capture_pack
: diag::ext_init_capture_pack);
- DeductType = Context.getPackExpansionType(DeductType, NumExpansions);
+ DeductType = Context.getPackExpansionType(DeductType, NumExpansions,
+ /*ExpectPackInType=*/false);
TLB.push<PackExpansionTypeLoc>(DeductType).setEllipsisLoc(EllipsisLoc);
} else {
// Just ignore the ellipsis for now and form a non-pack variable. We'll
@@ -994,6 +998,10 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
if (getLangOpts().CUDA)
CUDASetLambdaAttrs(Method);
+ // OpenMP lambdas might get assumumption attributes.
+ if (LangOpts.OpenMP)
+ ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Method);
+
// Number the lambda for linkage purposes if necessary.
handleLambdaNumbering(Class, Method);
@@ -1261,30 +1269,89 @@ void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
PopFunctionScopeInfo();
}
+template <typename Func>
+static void repeatForLambdaConversionFunctionCallingConvs(
+ Sema &S, const FunctionProtoType &CallOpProto, Func F) {
+ CallingConv DefaultFree = S.Context.getDefaultCallingConvention(
+ CallOpProto.isVariadic(), /*IsCXXMethod=*/false);
+ CallingConv DefaultMember = S.Context.getDefaultCallingConvention(
+ CallOpProto.isVariadic(), /*IsCXXMethod=*/true);
+ CallingConv CallOpCC = CallOpProto.getCallConv();
+
+ /// Implement emitting a version of the operator for many of the calling
+ /// conventions for MSVC, as described here:
+ /// https://devblogs.microsoft.com/oldnewthing/20150220-00/?p=44623.
+ /// Experimentally, we determined that cdecl, stdcall, fastcall, and
+ /// vectorcall are generated by MSVC when it is supported by the target.
+ /// Additionally, we are ensuring that the default-free/default-member and
+ /// call-operator calling convention are generated as well.
+ /// NOTE: We intentionally generate a 'thiscall' on Win32 implicitly from the
+ /// 'member default', despite MSVC not doing so. We do this in order to ensure
+ /// that someone who intentionally places 'thiscall' on the lambda call
+ /// operator will still get that overload, since we don't have the a way of
+ /// detecting the attribute by the time we get here.
+ if (S.getLangOpts().MSVCCompat) {
+ CallingConv Convs[] = {
+ CC_C, CC_X86StdCall, CC_X86FastCall, CC_X86VectorCall,
+ DefaultFree, DefaultMember, CallOpCC};
+ llvm::sort(Convs);
+ llvm::iterator_range<CallingConv *> Range(
+ std::begin(Convs), std::unique(std::begin(Convs), std::end(Convs)));
+ const TargetInfo &TI = S.getASTContext().getTargetInfo();
+
+ for (CallingConv C : Range) {
+ if (TI.checkCallingConvention(C) == TargetInfo::CCCR_OK)
+ F(C);
+ }
+ return;
+ }
+
+ if (CallOpCC == DefaultMember && DefaultMember != DefaultFree) {
+ F(DefaultFree);
+ F(DefaultMember);
+ } else {
+ F(CallOpCC);
+ }
+}
+
+// Returns the 'standard' calling convention to be used for the lambda
+// conversion function, that is, the 'free' function calling convention unless
+// it is overridden by a non-default calling convention attribute.
+static CallingConv
+getLambdaConversionFunctionCallConv(Sema &S,
+ const FunctionProtoType *CallOpProto) {
+ CallingConv DefaultFree = S.Context.getDefaultCallingConvention(
+ CallOpProto->isVariadic(), /*IsCXXMethod=*/false);
+ CallingConv DefaultMember = S.Context.getDefaultCallingConvention(
+ CallOpProto->isVariadic(), /*IsCXXMethod=*/true);
+ CallingConv CallOpCC = CallOpProto->getCallConv();
+
+ // If the call-operator hasn't been changed, return both the 'free' and
+ // 'member' function calling convention.
+ if (CallOpCC == DefaultMember && DefaultMember != DefaultFree)
+ return DefaultFree;
+ return CallOpCC;
+}
+
QualType Sema::getLambdaConversionFunctionResultType(
- const FunctionProtoType *CallOpProto) {
- // The function type inside the pointer type is the same as the call
- // operator with some tweaks. The calling convention is the default free
- // function convention, and the type qualifications are lost.
+ const FunctionProtoType *CallOpProto, CallingConv CC) {
const FunctionProtoType::ExtProtoInfo CallOpExtInfo =
CallOpProto->getExtProtoInfo();
FunctionProtoType::ExtProtoInfo InvokerExtInfo = CallOpExtInfo;
- CallingConv CC = Context.getDefaultCallingConvention(
- CallOpProto->isVariadic(), /*IsCXXMethod=*/false);
InvokerExtInfo.ExtInfo = InvokerExtInfo.ExtInfo.withCallingConv(CC);
InvokerExtInfo.TypeQuals = Qualifiers();
assert(InvokerExtInfo.RefQualifier == RQ_None &&
- "Lambda's call operator should not have a reference qualifier");
+ "Lambda's call operator should not have a reference qualifier");
return Context.getFunctionType(CallOpProto->getReturnType(),
CallOpProto->getParamTypes(), InvokerExtInfo);
}
/// Add a lambda's conversion to function pointer, as described in
/// C++11 [expr.prim.lambda]p6.
-static void addFunctionPointerConversion(Sema &S,
- SourceRange IntroducerRange,
+static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange,
CXXRecordDecl *Class,
- CXXMethodDecl *CallOperator) {
+ CXXMethodDecl *CallOperator,
+ QualType InvokerFunctionTy) {
// This conversion is explicitly disabled if the lambda's function has
// pass_object_size attributes on any of its parameters.
auto HasPassObjectSizeAttr = [](const ParmVarDecl *P) {
@@ -1294,8 +1361,6 @@ static void addFunctionPointerConversion(Sema &S,
return;
// Add the conversion to function pointer.
- QualType InvokerFunctionTy = S.getLambdaConversionFunctionResultType(
- CallOperator->getType()->castAs<FunctionProtoType>());
QualType PtrToFunctionTy = S.Context.getPointerType(InvokerFunctionTy);
// Create the type of the conversion function.
@@ -1380,7 +1445,8 @@ static void addFunctionPointerConversion(Sema &S,
S.Context, Class, Loc,
DeclarationNameInfo(ConversionName, Loc, ConvNameLoc), ConvTy, ConvTSI,
/*isInline=*/true, ExplicitSpecifier(),
- S.getLangOpts().CPlusPlus17 ? CSK_constexpr : CSK_unspecified,
+ S.getLangOpts().CPlusPlus17 ? ConstexprSpecKind::Constexpr
+ : ConstexprSpecKind::Unspecified,
CallOperator->getBody()->getEndLoc());
Conversion->setAccess(AS_public);
Conversion->setImplicit(true);
@@ -1419,7 +1485,8 @@ static void addFunctionPointerConversion(Sema &S,
CXXMethodDecl *Invoke = CXXMethodDecl::Create(
S.Context, Class, Loc, DeclarationNameInfo(InvokerName, Loc),
InvokerFunctionTy, CallOperator->getTypeSourceInfo(), SC_Static,
- /*isInline=*/true, CSK_unspecified, CallOperator->getBody()->getEndLoc());
+ /*isInline=*/true, ConstexprSpecKind::Unspecified,
+ CallOperator->getBody()->getEndLoc());
for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I)
InvokerParams[I]->setOwningFunction(Invoke);
Invoke->setParams(InvokerParams);
@@ -1440,13 +1507,34 @@ static void addFunctionPointerConversion(Sema &S,
Class->addDecl(Invoke);
}
+/// Add a lambda's conversion to function pointers, as described in
+/// C++11 [expr.prim.lambda]p6. Note that in most cases, this should emit only a
+/// single pointer conversion. In the event that the default calling convention
+/// for free and member functions is different, it will emit both conventions.
+static void addFunctionPointerConversions(Sema &S, SourceRange IntroducerRange,
+ CXXRecordDecl *Class,
+ CXXMethodDecl *CallOperator) {
+ const FunctionProtoType *CallOpProto =
+ CallOperator->getType()->castAs<FunctionProtoType>();
+
+ repeatForLambdaConversionFunctionCallingConvs(
+ S, *CallOpProto, [&](CallingConv CC) {
+ QualType InvokerFunctionTy =
+ S.getLambdaConversionFunctionResultType(CallOpProto, CC);
+ addFunctionPointerConversion(S, IntroducerRange, Class, CallOperator,
+ InvokerFunctionTy);
+ });
+}
+
/// Add a lambda's conversion to block pointer.
static void addBlockPointerConversion(Sema &S,
SourceRange IntroducerRange,
CXXRecordDecl *Class,
CXXMethodDecl *CallOperator) {
+ const FunctionProtoType *CallOpProto =
+ CallOperator->getType()->castAs<FunctionProtoType>();
QualType FunctionTy = S.getLambdaConversionFunctionResultType(
- CallOperator->getType()->castAs<FunctionProtoType>());
+ CallOpProto, getLambdaConversionFunctionCallConv(S, CallOpProto));
QualType BlockPtrTy = S.Context.getBlockPointerType(FunctionTy);
FunctionProtoType::ExtProtoInfo ConversionEPI(
@@ -1465,7 +1553,7 @@ static void addBlockPointerConversion(Sema &S,
CXXConversionDecl *Conversion = CXXConversionDecl::Create(
S.Context, Class, Loc, DeclarationNameInfo(Name, Loc, NameLoc), ConvTy,
S.Context.getTrivialTypeSourceInfo(ConvTy, Loc),
- /*isInline=*/true, ExplicitSpecifier(), CSK_unspecified,
+ /*isInline=*/true, ExplicitSpecifier(), ConstexprSpecKind::Unspecified,
CallOperator->getBody()->getEndLoc());
Conversion->setAccess(AS_public);
Conversion->setImplicit(true);
@@ -1623,8 +1711,9 @@ FieldDecl *Sema::BuildCaptureField(RecordDecl *RD,
// Build the non-static data member.
FieldDecl *Field =
- FieldDecl::Create(Context, RD, Loc, Loc, nullptr, FieldType, TSI, nullptr,
- false, ICIS_NoInit);
+ FieldDecl::Create(Context, RD, /*StartLoc=*/Loc, /*IdLoc=*/Loc,
+ /*Id=*/nullptr, FieldType, TSI, /*BW=*/nullptr,
+ /*Mutable=*/false, ICIS_NoInit);
// If the variable being captured has an invalid type, mark the class as
// invalid as well.
if (!FieldType->isDependentType()) {
@@ -1784,7 +1873,7 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
CUDACheckLambdaCapture(CallOperator, From);
}
- Class->setCaptures(Captures);
+ Class->setCaptures(Context, Captures);
// C++11 [expr.prim.lambda]p6:
// The closure type for a lambda-expression with no lambda-capture
@@ -1792,8 +1881,8 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
// to pointer to function having the same parameter and return
// types as the closure type's function call operator.
if (Captures.empty() && CaptureDefault == LCD_None)
- addFunctionPointerConversion(*this, IntroducerRange, Class,
- CallOperator);
+ addFunctionPointerConversions(*this, IntroducerRange, Class,
+ CallOperator);
// Objective-C++:
// The closure type for a lambda-expression has a public non-virtual
@@ -1828,8 +1917,8 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
CallOperator->setConstexprKind(
CheckConstexprFunctionDefinition(CallOperator,
CheckConstexprKind::CheckValid)
- ? CSK_constexpr
- : CSK_unspecified);
+ ? ConstexprSpecKind::Constexpr
+ : ConstexprSpecKind::Unspecified);
}
// Emit delayed shadowing warnings now that the full capture list is known.
diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp
index 5757eaf3fac0..29038ab9fe1c 100644
--- a/clang/lib/Sema/SemaLookup.cpp
+++ b/clang/lib/Sema/SemaLookup.cpp
@@ -894,10 +894,9 @@ bool Sema::LookupBuiltin(LookupResult &R) {
Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return false;
- if (NamedDecl *D = LazilyCreateBuiltin((IdentifierInfo *)II,
- BuiltinID, TUScope,
- R.isForRedeclaration(),
- R.getNameLoc())) {
+ if (NamedDecl *D =
+ LazilyCreateBuiltin(II, BuiltinID, TUScope,
+ R.isForRedeclaration(), R.getNameLoc())) {
R.addDecl(D);
return true;
}
@@ -908,6 +907,24 @@ bool Sema::LookupBuiltin(LookupResult &R) {
return false;
}
+/// Looks up the declaration of "struct objc_super" and
+/// saves it for later use in building builtin declaration of
+/// objc_msgSendSuper and objc_msgSendSuper_stret.
+static void LookupPredefedObjCSuperType(Sema &Sema, Scope *S) {
+ ASTContext &Context = Sema.Context;
+ LookupResult Result(Sema, &Context.Idents.get("objc_super"), SourceLocation(),
+ Sema::LookupTagName);
+ Sema.LookupName(Result, S);
+ if (Result.getResultKind() == LookupResult::Found)
+ if (const TagDecl *TD = Result.getAsSingle<TagDecl>())
+ Context.setObjCSuperType(Context.getTagDeclType(TD));
+}
+
+void Sema::LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID) {
+ if (ID == Builtin::BIobjc_msgSendSuper)
+ LookupPredefedObjCSuperType(*this, S);
+}
+
/// Determine whether we can declare a special member function within
/// the class at this point.
static bool CanDeclareSpecialMemberFunction(const CXXRecordDecl *Class) {
@@ -2068,47 +2085,6 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
return Found;
}
-/// Callback that looks for any member of a class with the given name.
-static bool LookupAnyMember(const CXXBaseSpecifier *Specifier,
- CXXBasePath &Path, DeclarationName Name) {
- RecordDecl *BaseRecord = Specifier->getType()->castAs<RecordType>()->getDecl();
-
- Path.Decls = BaseRecord->lookup(Name);
- return !Path.Decls.empty();
-}
-
-/// Determine whether the given set of member declarations contains only
-/// static members, nested types, and enumerators.
-template<typename InputIterator>
-static bool HasOnlyStaticMembers(InputIterator First, InputIterator Last) {
- Decl *D = (*First)->getUnderlyingDecl();
- if (isa<VarDecl>(D) || isa<TypeDecl>(D) || isa<EnumConstantDecl>(D))
- return true;
-
- if (isa<CXXMethodDecl>(D)) {
- // Determine whether all of the methods are static.
- bool AllMethodsAreStatic = true;
- for(; First != Last; ++First) {
- D = (*First)->getUnderlyingDecl();
-
- if (!isa<CXXMethodDecl>(D)) {
- assert(isa<TagDecl>(D) && "Non-function must be a tag decl");
- break;
- }
-
- if (!cast<CXXMethodDecl>(D)->isStatic()) {
- AllMethodsAreStatic = false;
- break;
- }
- }
-
- if (AllMethodsAreStatic)
- return true;
- }
-
- return false;
-}
-
/// Perform qualified name lookup into a given context.
///
/// Qualified name lookup (C++ [basic.lookup.qual]) is used to find
@@ -2186,6 +2162,13 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
if (!LookupRec || !LookupRec->getDefinition())
return false;
+ // We're done for lookups that can never succeed for C++ classes.
+ if (R.getLookupKind() == LookupOperatorName ||
+ R.getLookupKind() == LookupNamespaceName ||
+ R.getLookupKind() == LookupObjCProtocolName ||
+ R.getLookupKind() == LookupLabel)
+ return false;
+
// If we're performing qualified name lookup into a dependent class,
// then we are actually looking into a current instantiation. If we have any
// dependent base classes, then we either have to delay lookup until
@@ -2198,59 +2181,27 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
}
// Perform lookup into our base classes.
- CXXBasePaths Paths;
- Paths.setOrigin(LookupRec);
-
- // Look for this member in our base classes
- bool (*BaseCallback)(const CXXBaseSpecifier *Specifier, CXXBasePath &Path,
- DeclarationName Name) = nullptr;
- switch (R.getLookupKind()) {
- case LookupObjCImplicitSelfParam:
- case LookupOrdinaryName:
- case LookupMemberName:
- case LookupRedeclarationWithLinkage:
- case LookupLocalFriendName:
- case LookupDestructorName:
- BaseCallback = &CXXRecordDecl::FindOrdinaryMember;
- break;
-
- case LookupTagName:
- BaseCallback = &CXXRecordDecl::FindTagMember;
- break;
-
- case LookupAnyName:
- BaseCallback = &LookupAnyMember;
- break;
-
- case LookupOMPReductionName:
- BaseCallback = &CXXRecordDecl::FindOMPReductionMember;
- break;
-
- case LookupOMPMapperName:
- BaseCallback = &CXXRecordDecl::FindOMPMapperMember;
- break;
-
- case LookupUsingDeclName:
- // This lookup is for redeclarations only.
-
- case LookupOperatorName:
- case LookupNamespaceName:
- case LookupObjCProtocolName:
- case LookupLabel:
- // These lookups will never find a member in a C++ class (or base class).
- return false;
-
- case LookupNestedNameSpecifierName:
- BaseCallback = &CXXRecordDecl::FindNestedNameSpecifierMember;
- break;
- }
DeclarationName Name = R.getLookupName();
- if (!LookupRec->lookupInBases(
- [=](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
- return BaseCallback(Specifier, Path, Name);
- },
- Paths))
+ unsigned IDNS = R.getIdentifierNamespace();
+
+ // Look for this member in our base classes.
+ auto BaseCallback = [Name, IDNS](const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path) -> bool {
+ CXXRecordDecl *BaseRecord = Specifier->getType()->getAsCXXRecordDecl();
+ // Drop leading non-matching lookup results from the declaration list so
+ // we don't need to consider them again below.
+ for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
+ Path.Decls = Path.Decls.slice(1)) {
+ if (Path.Decls.front()->isInIdentifierNamespace(IDNS))
+ return true;
+ }
+ return false;
+ };
+
+ CXXBasePaths Paths;
+ Paths.setOrigin(LookupRec);
+ if (!LookupRec->lookupInBases(BaseCallback, Paths))
return false;
R.setNamingClass(LookupRec);
@@ -2265,6 +2216,85 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
int SubobjectNumber = 0;
AccessSpecifier SubobjectAccess = AS_none;
+ // Check whether the given lookup result contains only static members.
+ auto HasOnlyStaticMembers = [&](DeclContextLookupResult Result) {
+ for (NamedDecl *ND : Result)
+ if (ND->isInIdentifierNamespace(IDNS) && ND->isCXXInstanceMember())
+ return false;
+ return true;
+ };
+
+ bool TemplateNameLookup = R.isTemplateNameLookup();
+
+ // Determine whether two sets of members contain the same members, as
+ // required by C++ [class.member.lookup]p6.
+ auto HasSameDeclarations = [&](DeclContextLookupResult A,
+ DeclContextLookupResult B) {
+ using Iterator = DeclContextLookupResult::iterator;
+ using Result = const void *;
+
+ auto Next = [&](Iterator &It, Iterator End) -> Result {
+ while (It != End) {
+ NamedDecl *ND = *It++;
+ if (!ND->isInIdentifierNamespace(IDNS))
+ continue;
+
+ // C++ [temp.local]p3:
+ // A lookup that finds an injected-class-name (10.2) can result in
+ // an ambiguity in certain cases (for example, if it is found in
+ // more than one base class). If all of the injected-class-names
+ // that are found refer to specializations of the same class
+ // template, and if the name is used as a template-name, the
+ // reference refers to the class template itself and not a
+ // specialization thereof, and is not ambiguous.
+ if (TemplateNameLookup)
+ if (auto *TD = getAsTemplateNameDecl(ND))
+ ND = TD;
+
+ // C++ [class.member.lookup]p3:
+ // type declarations (including injected-class-names) are replaced by
+ // the types they designate
+ if (const TypeDecl *TD = dyn_cast<TypeDecl>(ND->getUnderlyingDecl())) {
+ QualType T = Context.getTypeDeclType(TD);
+ return T.getCanonicalType().getAsOpaquePtr();
+ }
+
+ return ND->getUnderlyingDecl()->getCanonicalDecl();
+ }
+ return nullptr;
+ };
+
+ // We'll often find the declarations are in the same order. Handle this
+ // case (and the special case of only one declaration) efficiently.
+ Iterator AIt = A.begin(), BIt = B.begin(), AEnd = A.end(), BEnd = B.end();
+ while (true) {
+ Result AResult = Next(AIt, AEnd);
+ Result BResult = Next(BIt, BEnd);
+ if (!AResult && !BResult)
+ return true;
+ if (!AResult || !BResult)
+ return false;
+ if (AResult != BResult) {
+ // Found a mismatch; carefully check both lists, accounting for the
+ // possibility of declarations appearing more than once.
+ llvm::SmallDenseMap<Result, bool, 32> AResults;
+ for (; AResult; AResult = Next(AIt, AEnd))
+ AResults.insert({AResult, /*FoundInB*/false});
+ unsigned Found = 0;
+ for (; BResult; BResult = Next(BIt, BEnd)) {
+ auto It = AResults.find(BResult);
+ if (It == AResults.end())
+ return false;
+ if (!It->second) {
+ It->second = true;
+ ++Found;
+ }
+ }
+ return AResults.size() == Found;
+ }
+ }
+ };
+
for (CXXBasePaths::paths_iterator Path = Paths.begin(), PathEnd = Paths.end();
Path != PathEnd; ++Path) {
const CXXBasePathElement &PathElement = Path->back();
@@ -2281,51 +2311,25 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
continue;
}
- if (SubobjectType
- != Context.getCanonicalType(PathElement.Base->getType())) {
+ if (SubobjectType !=
+ Context.getCanonicalType(PathElement.Base->getType())) {
// We found members of the given name in two subobjects of
// different types. If the declaration sets aren't the same, this
// lookup is ambiguous.
- if (HasOnlyStaticMembers(Path->Decls.begin(), Path->Decls.end())) {
- CXXBasePaths::paths_iterator FirstPath = Paths.begin();
- DeclContext::lookup_iterator FirstD = FirstPath->Decls.begin();
- DeclContext::lookup_iterator CurrentD = Path->Decls.begin();
-
- // Get the decl that we should use for deduplicating this lookup.
- auto GetRepresentativeDecl = [&](NamedDecl *D) -> Decl * {
- // C++ [temp.local]p3:
- // A lookup that finds an injected-class-name (10.2) can result in
- // an ambiguity in certain cases (for example, if it is found in
- // more than one base class). If all of the injected-class-names
- // that are found refer to specializations of the same class
- // template, and if the name is used as a template-name, the
- // reference refers to the class template itself and not a
- // specialization thereof, and is not ambiguous.
- if (R.isTemplateNameLookup())
- if (auto *TD = getAsTemplateNameDecl(D))
- D = TD;
- return D->getUnderlyingDecl()->getCanonicalDecl();
- };
-
- while (FirstD != FirstPath->Decls.end() &&
- CurrentD != Path->Decls.end()) {
- if (GetRepresentativeDecl(*FirstD) !=
- GetRepresentativeDecl(*CurrentD))
- break;
-
- ++FirstD;
- ++CurrentD;
- }
-
- if (FirstD == FirstPath->Decls.end() &&
- CurrentD == Path->Decls.end())
- continue;
- }
+ //
+ // FIXME: The language rule says that this applies irrespective of
+ // whether the sets contain only static members.
+ if (HasOnlyStaticMembers(Path->Decls) &&
+ HasSameDeclarations(Paths.begin()->Decls, Path->Decls))
+ continue;
R.setAmbiguousBaseSubobjectTypes(Paths);
return true;
}
+ // FIXME: This language rule no longer exists. Checking for ambiguous base
+ // subobjects should be done as part of formation of a class member access
+ // expression (when converting the object parameter to the member's type).
if (SubobjectNumber != PathElement.SubobjectNumber) {
// We have a different subobject of the same type.
@@ -2333,7 +2337,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// A static member, a nested type or an enumerator defined in
// a base class T can unambiguously be found even if an object
// has more than one base class subobject of type T.
- if (HasOnlyStaticMembers(Path->Decls.begin(), Path->Decls.end()))
+ if (HasOnlyStaticMembers(Path->Decls))
continue;
// We have found a nonstatic member name in multiple, distinct
@@ -2348,7 +2352,8 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
for (auto *D : Paths.front().Decls) {
AccessSpecifier AS = CXXRecordDecl::MergeAccess(SubobjectAccess,
D->getAccess());
- R.addDecl(D, AS);
+ if (NamedDecl *ND = R.getAcceptableDecl(D))
+ R.addDecl(ND, AS);
}
R.resolveKind();
return true;
@@ -2504,13 +2509,23 @@ void Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
<< Name << LookupRange;
CXXBasePaths *Paths = Result.getBasePaths();
- std::set<Decl *> DeclsPrinted;
+ std::set<const NamedDecl *> DeclsPrinted;
for (CXXBasePaths::paths_iterator Path = Paths->begin(),
PathEnd = Paths->end();
Path != PathEnd; ++Path) {
- Decl *D = Path->Decls.front();
- if (DeclsPrinted.insert(D).second)
- Diag(D->getLocation(), diag::note_ambiguous_member_found);
+ const NamedDecl *D = Path->Decls.front();
+ if (!D->isInIdentifierNamespace(Result.getIdentifierNamespace()))
+ continue;
+ if (DeclsPrinted.insert(D).second) {
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(D->getUnderlyingDecl()))
+ Diag(D->getLocation(), diag::note_ambiguous_member_type_found)
+ << TD->getUnderlyingType();
+ else if (const auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
+ Diag(D->getLocation(), diag::note_ambiguous_member_type_found)
+ << Context.getTypeDeclType(TD);
+ else
+ Diag(D->getLocation(), diag::note_ambiguous_member_found);
+ }
}
break;
}
@@ -2981,7 +2996,6 @@ ObjCProtocolDecl *Sema::LookupProtocol(IdentifierInfo *II,
}
void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
- QualType T1, QualType T2,
UnresolvedSetImpl &Functions) {
// C++ [over.match.oper]p3:
// -- The set of non-member candidates is the result of the
@@ -3319,9 +3333,9 @@ CXXDestructorDecl *Sema::LookupDestructor(CXXRecordDecl *Class) {
/// and filter the results to the appropriate set for the given argument types.
Sema::LiteralOperatorLookupResult
Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
- ArrayRef<QualType> ArgTys,
- bool AllowRaw, bool AllowTemplate,
- bool AllowStringTemplate, bool DiagnoseMissing) {
+ ArrayRef<QualType> ArgTys, bool AllowRaw,
+ bool AllowTemplate, bool AllowStringTemplatePack,
+ bool DiagnoseMissing, StringLiteral *StringLit) {
LookupName(R, S);
assert(R.getResultKind() != LookupResult::Ambiguous &&
"literal operator lookup can't be ambiguous");
@@ -3329,10 +3343,11 @@ Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
// Filter the lookup results appropriately.
LookupResult::Filter F = R.makeFilter();
+ bool AllowCooked = true;
bool FoundRaw = false;
bool FoundTemplate = false;
- bool FoundStringTemplate = false;
- bool FoundExactMatch = false;
+ bool FoundStringTemplatePack = false;
+ bool FoundCooked = false;
while (F.hasNext()) {
Decl *D = F.next();
@@ -3347,19 +3362,19 @@ Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
bool IsRaw = false;
bool IsTemplate = false;
- bool IsStringTemplate = false;
- bool IsExactMatch = false;
+ bool IsStringTemplatePack = false;
+ bool IsCooked = false;
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->getNumParams() == 1 &&
FD->getParamDecl(0)->getType()->getAs<PointerType>())
IsRaw = true;
else if (FD->getNumParams() == ArgTys.size()) {
- IsExactMatch = true;
+ IsCooked = true;
for (unsigned ArgIdx = 0; ArgIdx != ArgTys.size(); ++ArgIdx) {
QualType ParamTy = FD->getParamDecl(ArgIdx)->getType();
if (!Context.hasSameUnqualifiedType(ArgTys[ArgIdx], ParamTy)) {
- IsExactMatch = false;
+ IsCooked = false;
break;
}
}
@@ -3367,29 +3382,59 @@ Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
}
if (FunctionTemplateDecl *FD = dyn_cast<FunctionTemplateDecl>(D)) {
TemplateParameterList *Params = FD->getTemplateParameters();
- if (Params->size() == 1)
+ if (Params->size() == 1) {
IsTemplate = true;
- else
- IsStringTemplate = true;
+ if (!Params->getParam(0)->isTemplateParameterPack() && !StringLit) {
+ // Implied but not stated: user-defined integer and floating literals
+ // only ever use numeric literal operator templates, not templates
+ // taking a parameter of class type.
+ F.erase();
+ continue;
+ }
+
+ // A string literal template is only considered if the string literal
+ // is a well-formed template argument for the template parameter.
+ if (StringLit) {
+ SFINAETrap Trap(*this);
+ SmallVector<TemplateArgument, 1> Checked;
+ TemplateArgumentLoc Arg(TemplateArgument(StringLit), StringLit);
+ if (CheckTemplateArgument(Params->getParam(0), Arg, FD,
+ R.getNameLoc(), R.getNameLoc(), 0,
+ Checked) ||
+ Trap.hasErrorOccurred())
+ IsTemplate = false;
+ }
+ } else {
+ IsStringTemplatePack = true;
+ }
}
- if (IsExactMatch) {
- FoundExactMatch = true;
+ if (AllowTemplate && StringLit && IsTemplate) {
+ FoundTemplate = true;
AllowRaw = false;
- AllowTemplate = false;
- AllowStringTemplate = false;
- if (FoundRaw || FoundTemplate || FoundStringTemplate) {
+ AllowCooked = false;
+ AllowStringTemplatePack = false;
+ if (FoundRaw || FoundCooked || FoundStringTemplatePack) {
+ F.restart();
+ FoundRaw = FoundCooked = FoundStringTemplatePack = false;
+ }
+ } else if (AllowCooked && IsCooked) {
+ FoundCooked = true;
+ AllowRaw = false;
+ AllowTemplate = StringLit;
+ AllowStringTemplatePack = false;
+ if (FoundRaw || FoundTemplate || FoundStringTemplatePack) {
// Go through again and remove the raw and template decls we've
// already found.
F.restart();
- FoundRaw = FoundTemplate = FoundStringTemplate = false;
+ FoundRaw = FoundTemplate = FoundStringTemplatePack = false;
}
} else if (AllowRaw && IsRaw) {
FoundRaw = true;
} else if (AllowTemplate && IsTemplate) {
FoundTemplate = true;
- } else if (AllowStringTemplate && IsStringTemplate) {
- FoundStringTemplate = true;
+ } else if (AllowStringTemplatePack && IsStringTemplatePack) {
+ FoundStringTemplatePack = true;
} else {
F.erase();
}
@@ -3397,10 +3442,15 @@ Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
F.done();
+ // Per C++20 [lex.ext]p5, we prefer the template form over the non-template
+ // form for string literal operator templates.
+ if (StringLit && FoundTemplate)
+ return LOLR_Template;
+
// C++11 [lex.ext]p3, p4: If S contains a literal operator with a matching
// parameter type, that is used in preference to a raw literal operator
// or literal operator template.
- if (FoundExactMatch)
+ if (FoundCooked)
return LOLR_Cooked;
// C++11 [lex.ext]p3, p4: S shall contain a raw literal operator or a literal
@@ -3418,15 +3468,15 @@ Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
if (FoundTemplate)
return LOLR_Template;
- if (FoundStringTemplate)
- return LOLR_StringTemplate;
+ if (FoundStringTemplatePack)
+ return LOLR_StringTemplatePack;
// Didn't find anything we could use.
if (DiagnoseMissing) {
Diag(R.getNameLoc(), diag::err_ovl_no_viable_literal_operator)
<< R.getLookupName() << (int)ArgTys.size() << ArgTys[0]
<< (ArgTys.size() == 2 ? ArgTys[1] : QualType()) << AllowRaw
- << (AllowTemplate || AllowStringTemplate);
+ << (AllowTemplate || AllowStringTemplatePack);
return LOLR_Error;
}
diff --git a/clang/lib/Sema/SemaModule.cpp b/clang/lib/Sema/SemaModule.cpp
index 10de0ca91221..af95b1a93cc4 100644
--- a/clang/lib/Sema/SemaModule.cpp
+++ b/clang/lib/Sema/SemaModule.cpp
@@ -187,7 +187,7 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
Diag(Path[0].second, diag::err_module_redefinition) << ModuleName;
if (M->DefinitionLoc.isValid())
Diag(M->DefinitionLoc, diag::note_prev_module_definition);
- else if (const auto *FE = M->getASTFile())
+ else if (Optional<FileEntryRef> FE = M->getASTFile())
Diag(M->DefinitionLoc, diag::note_prev_module_definition_from_ast_file)
<< FE->getName();
Mod = M;
diff --git a/clang/lib/Sema/SemaObjCProperty.cpp b/clang/lib/Sema/SemaObjCProperty.cpp
index e301c62dd2c0..fdc30fe6f657 100644
--- a/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/clang/lib/Sema/SemaObjCProperty.cpp
@@ -1464,10 +1464,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
DeclRefExpr(Context, SelfDecl, false, SelfDecl->getType(), VK_LValue,
PropertyDiagLoc);
MarkDeclRefReferenced(SelfExpr);
- Expr *LoadSelfExpr =
- ImplicitCastExpr::Create(Context, SelfDecl->getType(),
- CK_LValueToRValue, SelfExpr, nullptr,
- VK_RValue);
+ Expr *LoadSelfExpr = ImplicitCastExpr::Create(
+ Context, SelfDecl->getType(), CK_LValueToRValue, SelfExpr, nullptr,
+ VK_RValue, FPOptionsOverride());
Expr *IvarRefExpr =
new (Context) ObjCIvarRefExpr(Ivar,
Ivar->getUsageType(SelfDecl->getType()),
@@ -1528,10 +1527,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
DeclRefExpr(Context, SelfDecl, false, SelfDecl->getType(), VK_LValue,
PropertyDiagLoc);
MarkDeclRefReferenced(SelfExpr);
- Expr *LoadSelfExpr =
- ImplicitCastExpr::Create(Context, SelfDecl->getType(),
- CK_LValueToRValue, SelfExpr, nullptr,
- VK_RValue);
+ Expr *LoadSelfExpr = ImplicitCastExpr::Create(
+ Context, SelfDecl->getType(), CK_LValueToRValue, SelfExpr, nullptr,
+ VK_RValue, FPOptionsOverride());
Expr *lhs =
new (Context) ObjCIvarRefExpr(Ivar,
Ivar->getUsageType(SelfDecl->getType()),
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 920463da4027..78707484f588 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -35,6 +35,7 @@
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <set>
@@ -48,7 +49,7 @@ using namespace llvm::omp;
static const Expr *checkMapClauseExpressionBase(
Sema &SemaRef, Expr *E,
OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
- OpenMPClauseKind CKind, bool NoDiagnose);
+ OpenMPClauseKind CKind, OpenMPDirectiveKind DKind, bool NoDiagnose);
namespace {
/// Default data sharing attributes, which can be applied to directive.
@@ -70,12 +71,15 @@ public:
const Expr *RefExpr = nullptr;
DeclRefExpr *PrivateCopy = nullptr;
SourceLocation ImplicitDSALoc;
+ bool AppliedToPointee = false;
DSAVarData() = default;
DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
const Expr *RefExpr, DeclRefExpr *PrivateCopy,
- SourceLocation ImplicitDSALoc, unsigned Modifier)
+ SourceLocation ImplicitDSALoc, unsigned Modifier,
+ bool AppliedToPointee)
: DKind(DKind), CKind(CKind), Modifier(Modifier), RefExpr(RefExpr),
- PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
+ PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc),
+ AppliedToPointee(AppliedToPointee) {}
};
using OperatorOffsetTy =
llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
@@ -99,6 +103,9 @@ private:
/// variable is marked as lastprivate(true) or not (false).
llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
DeclRefExpr *PrivateCopy = nullptr;
+ /// true if the attribute is applied to the pointee, not the variable
+ /// itself.
+ bool AppliedToPointee = false;
};
using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
using UsedRefMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
@@ -153,6 +160,7 @@ private:
OpenMPDirectiveKind Directive = OMPD_unknown;
DeclarationNameInfo DirectiveName;
Scope *CurScope = nullptr;
+ DeclContext *Context = nullptr;
SourceLocation ConstructLoc;
/// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
/// get the data (loop counters etc.) about enclosing loop-based construct.
@@ -184,6 +192,7 @@ private:
llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
UsesAllocatorsDecls;
+ Expr *DeclareMapperVar = nullptr;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
: Directive(DKind), DirectiveName(Name), CurScope(CurScope),
@@ -510,7 +519,8 @@ public:
/// Adds explicit data sharing attribute to the specified declaration.
void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
- DeclRefExpr *PrivateCopy = nullptr, unsigned Modifier = 0);
+ DeclRefExpr *PrivateCopy = nullptr, unsigned Modifier = 0,
+ bool AppliedToPointee = false);
/// Adds additional information for the reduction items with the reduction id
/// represented as an operator.
@@ -562,7 +572,8 @@ public:
/// match specified \a CPred predicate in any directive which matches \a DPred
/// predicate.
const DSAVarData
- hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ hasDSA(ValueDecl *D,
+ const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const;
/// Checks if the specified variables has data-sharing attributes which
@@ -570,15 +581,16 @@ public:
/// matches \a DPred predicate.
const DSAVarData
hasInnermostDSA(ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const;
/// Checks if the specified variables has explicit data-sharing
/// attributes which match specified \a CPred predicate at the specified
/// OpenMP region.
- bool hasExplicitDSA(const ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
- unsigned Level, bool NotLastprivate = false) const;
+ bool
+ hasExplicitDSA(const ValueDecl *D,
+ const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
+ unsigned Level, bool NotLastprivate = false) const;
/// Returns true if the directive at level \Level matches in the
/// specified \a DPred predicate.
@@ -908,6 +920,7 @@ public:
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->CurScope : nullptr;
}
+ void setContext(DeclContext *DC) { getTopOfStack().Context = DC; }
SourceLocation getConstructLoc() const {
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->ConstructLoc : SourceLocation();
@@ -1072,6 +1085,15 @@ public:
return None;
return I->getSecond();
}
+
+ void addDeclareMapperVarRef(Expr *Ref) {
+ SharingMapTy &StackElem = getTopOfStack();
+ StackElem.DeclareMapperVar = Ref;
+ }
+ const Expr *getDeclareMapperVarRef() const {
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->DeclareMapperVar : nullptr;
+ }
};
bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
@@ -1175,6 +1197,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
DVar.Modifier = Data.Modifier;
+ DVar.AppliedToPointee = Data.AppliedToPointee;
return DVar;
}
@@ -1331,7 +1354,8 @@ const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
}
void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
- DeclRefExpr *PrivateCopy, unsigned Modifier) {
+ DeclRefExpr *PrivateCopy, unsigned Modifier,
+ bool AppliedToPointee) {
D = getCanonicalDecl(D);
if (A == OMPC_threadprivate) {
DSAInfo &Data = Threadprivates[D];
@@ -1355,12 +1379,14 @@ void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(E, IsLastprivate);
Data.PrivateCopy = PrivateCopy;
+ Data.AppliedToPointee = AppliedToPointee;
if (PrivateCopy) {
DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
Data.Modifier = Modifier;
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
Data.PrivateCopy = nullptr;
+ Data.AppliedToPointee = AppliedToPointee;
}
}
}
@@ -1470,7 +1496,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
- Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task);
+ Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
+ /*AppliedToPointee=*/false);
}
return DSAVarData();
}
@@ -1496,7 +1523,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
- Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task);
+ Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task,
+ /*AppliedToPointee=*/false);
}
return DSAVarData();
}
@@ -1506,11 +1534,17 @@ bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
for (const_iterator E = end(); I != E; ++I) {
if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
isOpenMPTargetExecutionDirective(I->Directive)) {
- Scope *TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
- Scope *CurScope = getCurScope();
- while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
- CurScope = CurScope->getParent();
- return CurScope != TopScope;
+ if (I->CurScope) {
+ Scope *TopScope = I->CurScope->getParent();
+ Scope *CurScope = getCurScope();
+ while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
+ CurScope = CurScope->getParent();
+ return CurScope != TopScope;
+ }
+ for (DeclContext *DC = D->getDeclContext(); DC; DC = DC->getParent())
+ if (I->Context == DC)
+ return true;
+ return false;
}
}
return false;
@@ -1665,6 +1699,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
DVar.ImplicitDSALoc = I->DefaultAttrLoc;
DVar.DKind = I->Directive;
DVar.Modifier = Data.Modifier;
+ DVar.AppliedToPointee = Data.AppliedToPointee;
return DVar;
}
}
@@ -1686,7 +1721,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
// listed in a firstprivate clause, even if they are static data members.
DSAVarData DVarTemp = hasInnermostDSA(
D,
- [](OpenMPClauseKind C) {
+ [](OpenMPClauseKind C, bool) {
return C == OMPC_firstprivate || C == OMPC_shared;
},
MatchesAlways, FromParent);
@@ -1715,6 +1750,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
DVar.ImplicitDSALoc = I->DefaultAttrLoc;
DVar.DKind = I->Directive;
DVar.Modifier = Data.Modifier;
+ DVar.AppliedToPointee = Data.AppliedToPointee;
}
return DVar;
@@ -1745,7 +1781,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
const DSAStackTy::DSAVarData
DSAStackTy::hasDSA(ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const {
if (isStackEmpty())
@@ -1761,14 +1797,14 @@ DSAStackTy::hasDSA(ValueDecl *D,
continue;
const_iterator NewI = I;
DSAVarData DVar = getDSA(NewI, D);
- if (I == NewI && CPred(DVar.CKind))
+ if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee))
return DVar;
}
return {};
}
const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
- ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const {
if (isStackEmpty())
@@ -1782,26 +1818,28 @@ const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
return {};
const_iterator NewI = StartI;
DSAVarData DVar = getDSA(NewI, D);
- return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
+ return (NewI == StartI && CPred(DVar.CKind, DVar.AppliedToPointee))
+ ? DVar
+ : DSAVarData();
}
bool DSAStackTy::hasExplicitDSA(
- const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ const ValueDecl *D,
+ const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
unsigned Level, bool NotLastprivate) const {
if (getStackSize() <= Level)
return false;
D = getCanonicalDecl(D);
const SharingMapTy &StackElem = getStackElemAtLevel(Level);
auto I = StackElem.SharingMap.find(D);
- if (I != StackElem.SharingMap.end() &&
- I->getSecond().RefExpr.getPointer() &&
- CPred(I->getSecond().Attributes) &&
+ if (I != StackElem.SharingMap.end() && I->getSecond().RefExpr.getPointer() &&
+ CPred(I->getSecond().Attributes, I->getSecond().AppliedToPointee) &&
(!NotLastprivate || !I->getSecond().RefExpr.getInt()))
return true;
// Check predetermined rules for the loop control variables.
auto LI = StackElem.LCVMap.find(D);
if (LI != StackElem.LCVMap.end())
- return CPred(OMPC_private);
+ return CPred(OMPC_private, /*AppliedToPointee=*/false);
return false;
}
@@ -1859,27 +1897,27 @@ enum class FunctionEmissionStatus {
};
} // anonymous namespace
-Sema::DeviceDiagBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
- unsigned DiagID) {
+Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
+ unsigned DiagID) {
assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
"Expected OpenMP device compilation.");
FunctionDecl *FD = getCurFunctionDecl();
- DeviceDiagBuilder::Kind Kind = DeviceDiagBuilder::K_Nop;
+ SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
if (FD) {
FunctionEmissionStatus FES = getEmissionStatus(FD);
switch (FES) {
case FunctionEmissionStatus::Emitted:
- Kind = DeviceDiagBuilder::K_Immediate;
+ Kind = SemaDiagnosticBuilder::K_Immediate;
break;
case FunctionEmissionStatus::Unknown:
Kind = isOpenMPDeviceDelayedContext(*this)
- ? DeviceDiagBuilder::K_Deferred
- : DeviceDiagBuilder::K_Immediate;
+ ? SemaDiagnosticBuilder::K_Deferred
+ : SemaDiagnosticBuilder::K_Immediate;
break;
case FunctionEmissionStatus::TemplateDiscarded:
case FunctionEmissionStatus::OMPDiscarded:
- Kind = DeviceDiagBuilder::K_Nop;
+ Kind = SemaDiagnosticBuilder::K_Nop;
break;
case FunctionEmissionStatus::CUDADiscarded:
llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
@@ -1887,30 +1925,30 @@ Sema::DeviceDiagBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
}
}
- return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
+ return SemaDiagnosticBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
}
-Sema::DeviceDiagBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc,
- unsigned DiagID) {
+Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc,
+ unsigned DiagID) {
assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
"Expected OpenMP host compilation.");
FunctionEmissionStatus FES = getEmissionStatus(getCurFunctionDecl());
- DeviceDiagBuilder::Kind Kind = DeviceDiagBuilder::K_Nop;
+ SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
switch (FES) {
case FunctionEmissionStatus::Emitted:
- Kind = DeviceDiagBuilder::K_Immediate;
+ Kind = SemaDiagnosticBuilder::K_Immediate;
break;
case FunctionEmissionStatus::Unknown:
- Kind = DeviceDiagBuilder::K_Deferred;
+ Kind = SemaDiagnosticBuilder::K_Deferred;
break;
case FunctionEmissionStatus::TemplateDiscarded:
case FunctionEmissionStatus::OMPDiscarded:
case FunctionEmissionStatus::CUDADiscarded:
- Kind = DeviceDiagBuilder::K_Nop;
+ Kind = SemaDiagnosticBuilder::K_Nop;
break;
}
- return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
+ return SemaDiagnosticBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
}
static OpenMPDefaultmapClauseKind
@@ -2047,14 +2085,17 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
// By default, all the data that has a scalar type is mapped by copy
// (except for reduction variables).
// Defaultmap scalar is mutual exclusive to defaultmap pointer
- IsByRef =
- (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
- !Ty->isAnyPointerType()) ||
- !Ty->isScalarType() ||
- DSAStack->isDefaultmapCapturedByRef(
- Level, getVariableCategoryFromDecl(LangOpts, D)) ||
- DSAStack->hasExplicitDSA(
- D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
+ IsByRef = (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
+ !Ty->isAnyPointerType()) ||
+ !Ty->isScalarType() ||
+ DSAStack->isDefaultmapCapturedByRef(
+ Level, getVariableCategoryFromDecl(LangOpts, D)) ||
+ DSAStack->hasExplicitDSA(
+ D,
+ [](OpenMPClauseKind K, bool AppliedToPointee) {
+ return K == OMPC_reduction && !AppliedToPointee;
+ },
+ Level);
}
}
@@ -2065,8 +2106,9 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
OMPD_target) ||
!(DSAStack->hasExplicitDSA(
D,
- [](OpenMPClauseKind K) -> bool {
- return K == OMPC_firstprivate;
+ [](OpenMPClauseKind K, bool AppliedToPointee) -> bool {
+ return K == OMPC_firstprivate ||
+ (K == OMPC_reduction && AppliedToPointee);
},
Level, /*NotLastprivate=*/true) ||
DSAStack->isUsesAllocatorsDecl(Level, D))) &&
@@ -2078,7 +2120,8 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
// copy
!(DSAStack->getDefaultDSA() == DSA_firstprivate &&
!DSAStack->hasExplicitDSA(
- D, [](OpenMPClauseKind K) { return K != OMPC_unknown; }, Level) &&
+ D, [](OpenMPClauseKind K, bool) { return K != OMPC_unknown; },
+ Level) &&
!DSAStack->isLoopControlVariable(D, Level).first);
}
@@ -2141,7 +2184,8 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
return nullptr;
- } else if (isInOpenMPTargetExecutionDirective()) {
+ }
+ if (isInOpenMPTargetExecutionDirective()) {
// If the declaration is enclosed in a 'declare target' directive,
// then it should not be captured.
//
@@ -2159,6 +2203,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
break;
}
}
+ assert(CSI && "Failed to find CapturedRegionScopeInfo");
SmallVector<OpenMPDirectiveKind, 4> Regions;
getOpenMPCaptureRegions(Regions,
DSAStack->getDirective(CSI->OpenMPLevel));
@@ -2194,7 +2239,8 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
return VD ? VD : Info.second;
DSAStackTy::DSAVarData DVarTop =
DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
- if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind))
+ if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind) &&
+ (!VD || VD->hasLocalStorage() || !DVarTop.AppliedToPointee))
return VD ? VD : cast<VarDecl>(DVarTop.PrivateCopy->getDecl());
// Threadprivate variables must not be captured.
if (isOpenMPThreadPrivate(DVarTop.CKind))
@@ -2202,7 +2248,11 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
// The variable is not private or it is the variable in the directive with
// default(none) clause and not used in any clause.
DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
- D, isOpenMPPrivate, [](OpenMPDirectiveKind) { return true; },
+ D,
+ [](OpenMPClauseKind C, bool AppliedToPointee) {
+ return isOpenMPPrivate(C) && !AppliedToPointee;
+ },
+ [](OpenMPDirectiveKind) { return true; },
DSAStack->isClauseParsingMode());
// Global shared must not be captured.
if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
@@ -2244,7 +2294,11 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
[](OpenMPDirectiveKind K) { return isOpenMPTaskingDirective(K); },
Level)) {
bool IsTriviallyCopyable =
- D->getType().getNonReferenceType().isTriviallyCopyableType(Context);
+ D->getType().getNonReferenceType().isTriviallyCopyableType(Context) &&
+ !D->getType()
+ .getNonReferenceType()
+ .getCanonicalType()
+ ->getAsCXXRecordDecl();
OpenMPDirectiveKind DKind = DSAStack->getDirective(Level);
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, DKind);
@@ -2252,7 +2306,8 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
(IsTriviallyCopyable ||
!isOpenMPTaskLoopDirective(CaptureRegions[CapLevel]))) {
if (DSAStack->hasExplicitDSA(
- D, [](OpenMPClauseKind K) { return K == OMPC_firstprivate; },
+ D,
+ [](OpenMPClauseKind K, bool) { return K == OMPC_firstprivate; },
Level, /*NotLastprivate=*/true))
return OMPC_firstprivate;
DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
@@ -2273,7 +2328,8 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
DSAStack->isLoopControlVariable(D).first) &&
!DSAStack->hasExplicitDSA(
- D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
+ D, [](OpenMPClauseKind K, bool) { return K != OMPC_private; },
+ Level) &&
!isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
return OMPC_private;
}
@@ -2281,7 +2337,8 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
DSAStack->isForceVarCapturing() &&
!DSAStack->hasExplicitDSA(
- D, [](OpenMPClauseKind K) { return K == OMPC_copyin; }, Level))
+ D, [](OpenMPClauseKind K, bool) { return K == OMPC_copyin; },
+ Level))
return OMPC_private;
}
// User-defined allocators are private since they must be defined in the
@@ -2292,7 +2349,8 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
return OMPC_private;
return (DSAStack->hasExplicitDSA(
- D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
+ D, [](OpenMPClauseKind K, bool) { return K == OMPC_private; },
+ Level) ||
(DSAStack->isClauseParsingMode() &&
DSAStack->getClauseParsingMode() == OMPC_private) ||
// Consider taskgroup reduction descriptor variable a private
@@ -2317,15 +2375,16 @@ void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
OpenMPClauseKind OMPC = OMPC_unknown;
for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
const unsigned NewLevel = I - 1;
- if (DSAStack->hasExplicitDSA(D,
- [&OMPC](const OpenMPClauseKind K) {
- if (isOpenMPPrivate(K)) {
- OMPC = K;
- return true;
- }
- return false;
- },
- NewLevel))
+ if (DSAStack->hasExplicitDSA(
+ D,
+ [&OMPC](const OpenMPClauseKind K, bool AppliedToPointee) {
+ if (isOpenMPPrivate(K) && !AppliedToPointee) {
+ OMPC = K;
+ return true;
+ }
+ return false;
+ },
+ NewLevel))
break;
if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
D, NewLevel,
@@ -2368,17 +2427,20 @@ bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (!VD->hasLocalStorage()) {
+ if (isInOpenMPTargetExecutionDirective())
+ return true;
DSAStackTy::DSAVarData TopDVar =
DSAStack->getTopDSA(D, /*FromParent=*/false);
unsigned NumLevels =
getOpenMPCaptureLevels(DSAStack->getDirective(Level));
if (Level == 0)
return (NumLevels == CaptureLevel + 1) && TopDVar.CKind != OMPC_shared;
- DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level - 1);
- return DVar.CKind != OMPC_shared ||
- isOpenMPGlobalCapturedDecl(
- D, Level - 1,
- getOpenMPCaptureLevels(DSAStack->getDirective(Level - 1)) - 1);
+ do {
+ --Level;
+ DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
+ if (DVar.CKind != OMPC_shared)
+ return true;
+ } while (Level > 0);
}
}
return true;
@@ -2388,10 +2450,6 @@ void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
void Sema::ActOnOpenMPBeginDeclareVariant(SourceLocation Loc,
OMPTraitInfo &TI) {
- if (!OMPDeclareVariantScopes.empty()) {
- Diag(Loc, diag::warn_nested_declare_variant);
- return;
- }
OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI));
}
@@ -2424,7 +2482,7 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
StringRef HostDevTy =
getOpenMPSimpleClauseTypeName(OMPC_device_type, OMPC_DEVICE_TYPE_host);
Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
- Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
+ Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
diag::note_omp_marked_device_type_here)
<< HostDevTy;
return;
@@ -2435,7 +2493,7 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
- Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
+ Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
diag::note_omp_marked_device_type_here)
<< NoHostDevTy;
}
@@ -3138,6 +3196,64 @@ Sema::ActOnOpenMPRequiresDirective(SourceLocation Loc,
return DeclGroupPtrTy::make(DeclGroupRef(D));
}
+void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc,
+ OpenMPDirectiveKind DKind,
+ ArrayRef<StringRef> Assumptions,
+ bool SkippedClauses) {
+ if (!SkippedClauses && Assumptions.empty())
+ Diag(Loc, diag::err_omp_no_clause_for_directive)
+ << llvm::omp::getAllAssumeClauseOptions()
+ << llvm::omp::getOpenMPDirectiveName(DKind);
+
+ auto *AA = AssumptionAttr::Create(Context, llvm::join(Assumptions, ","), Loc);
+ if (DKind == llvm::omp::Directive::OMPD_begin_assumes) {
+ OMPAssumeScoped.push_back(AA);
+ return;
+ }
+
+ // Global assumes without assumption clauses are ignored.
+ if (Assumptions.empty())
+ return;
+
+ assert(DKind == llvm::omp::Directive::OMPD_assumes &&
+ "Unexpected omp assumption directive!");
+ OMPAssumeGlobal.push_back(AA);
+
+ // The OMPAssumeGlobal scope above will take care of new declarations but
+ // we also want to apply the assumption to existing ones, e.g., to
+ // declarations in included headers. To this end, we traverse all existing
+ // declaration contexts and annotate function declarations here.
+ SmallVector<DeclContext *, 8> DeclContexts;
+ auto *Ctx = CurContext;
+ while (Ctx->getLexicalParent())
+ Ctx = Ctx->getLexicalParent();
+ DeclContexts.push_back(Ctx);
+ while (!DeclContexts.empty()) {
+ DeclContext *DC = DeclContexts.pop_back_val();
+ for (auto *SubDC : DC->decls()) {
+ if (SubDC->isInvalidDecl())
+ continue;
+ if (auto *CTD = dyn_cast<ClassTemplateDecl>(SubDC)) {
+ DeclContexts.push_back(CTD->getTemplatedDecl());
+ for (auto *S : CTD->specializations())
+ DeclContexts.push_back(S);
+ continue;
+ }
+ if (auto *DC = dyn_cast<DeclContext>(SubDC))
+ DeclContexts.push_back(DC);
+ if (auto *F = dyn_cast<FunctionDecl>(SubDC)) {
+ F->addAttr(AA);
+ continue;
+ }
+ }
+ }
+}
+
+void Sema::ActOnOpenMPEndAssumesDirective() {
+ assert(isInOpenMPAssumeScope() && "Not in OpenMP assumes scope!");
+ OMPAssumeScoped.pop_back();
+}
+
OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList) {
/// For target specific clauses, the requires directive cannot be
@@ -3249,6 +3365,14 @@ getMapClauseKindFromModifier(OpenMPDefaultmapClauseModifier M,
case OMPC_DEFAULTMAP_MODIFIER_tofrom:
Kind = OMPC_MAP_tofrom;
break;
+ case OMPC_DEFAULTMAP_MODIFIER_present:
+ // OpenMP 5.1 [2.21.7.3] defaultmap clause, Description]
+ // If implicit-behavior is present, each variable referenced in the
+ // construct in the category specified by variable-category is treated as if
+ // it had been listed in a map clause with the map-type of alloc and
+ // map-type-modifier of present.
+ Kind = OMPC_MAP_alloc;
+ break;
case OMPC_DEFAULTMAP_MODIFIER_firstprivate:
case OMPC_DEFAULTMAP_MODIFIER_last:
llvm_unreachable("Unexpected defaultmap implicit behavior");
@@ -3275,8 +3399,11 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
bool ErrorFound = false;
bool TryCaptureCXXThisMembers = false;
CapturedStmt *CS = nullptr;
+ const static unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
- llvm::SmallVector<Expr *, 4> ImplicitMap[OMPC_MAP_delete];
+ llvm::SmallVector<Expr *, 4> ImplicitMap[DefaultmapKindNum][OMPC_MAP_delete];
+ llvm::SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
+ ImplicitMapModifier[DefaultmapKindNum];
Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
@@ -3284,6 +3411,13 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
// Check implicitly captured variables.
if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
return;
+ if (S->getDirectiveKind() == OMPD_atomic ||
+ S->getDirectiveKind() == OMPD_critical ||
+ S->getDirectiveKind() == OMPD_section ||
+ S->getDirectiveKind() == OMPD_master) {
+ Visit(S->getAssociatedStmt());
+ return;
+ }
visitSubCaptures(S->getInnermostCapturedStmt());
// Try to capture inner this->member references to generate correct mappings
// and diagnostics.
@@ -3405,6 +3539,18 @@ public:
}
}
}
+ if (SemaRef.getLangOpts().OpenMP > 50) {
+ bool IsModifierPresent = Stack->getDefaultmapModifier(ClauseKind) ==
+ OMPC_DEFAULTMAP_MODIFIER_present;
+ if (IsModifierPresent) {
+ if (llvm::find(ImplicitMapModifier[ClauseKind],
+ OMPC_MAP_MODIFIER_present) ==
+ std::end(ImplicitMapModifier[ClauseKind])) {
+ ImplicitMapModifier[ClauseKind].push_back(
+ OMPC_MAP_MODIFIER_present);
+ }
+ }
+ }
if (isOpenMPTargetExecutionDirective(DKind) &&
!Stack->isLoopControlVariable(VD).first) {
@@ -3445,7 +3591,7 @@ public:
Stack->getDefaultmapModifier(ClauseKind);
OpenMPMapClauseKind Kind = getMapClauseKindFromModifier(
M, ClauseKind == OMPC_DEFAULTMAP_aggregate || Res);
- ImplicitMap[Kind].emplace_back(E);
+ ImplicitMap[ClauseKind][Kind].emplace_back(E);
}
return;
}
@@ -3456,7 +3602,10 @@ public:
// enclosing worksharing or parallel construct may not be accessed in an
// explicit task.
DVar = Stack->hasInnermostDSA(
- VD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
+ VD,
+ [](OpenMPClauseKind C, bool AppliedToPointee) {
+ return C == OMPC_reduction && !AppliedToPointee;
+ },
[](OpenMPDirectiveKind K) {
return isOpenMPParallelDirective(K) ||
isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K);
@@ -3529,9 +3678,11 @@ public:
OpenMPDefaultmapClauseModifier Modifier =
Stack->getDefaultmapModifier(OMPC_DEFAULTMAP_aggregate);
+ OpenMPDefaultmapClauseKind ClauseKind =
+ getVariableCategoryFromDecl(SemaRef.getLangOpts(), FD);
OpenMPMapClauseKind Kind = getMapClauseKindFromModifier(
Modifier, /*IsAggregateOrDeclareTarget*/ true);
- ImplicitMap[Kind].emplace_back(E);
+ ImplicitMap[ClauseKind][Kind].emplace_back(E);
return;
}
@@ -3541,7 +3692,10 @@ public:
// enclosing worksharing or parallel construct may not be accessed in
// an explicit task.
DVar = Stack->hasInnermostDSA(
- FD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
+ FD,
+ [](OpenMPClauseKind C, bool AppliedToPointee) {
+ return C == OMPC_reduction && !AppliedToPointee;
+ },
[](OpenMPDirectiveKind K) {
return isOpenMPParallelDirective(K) ||
isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K);
@@ -3570,6 +3724,7 @@ public:
if (isOpenMPTargetExecutionDirective(DKind)) {
OMPClauseMappableExprCommon::MappableExprComponentList CurComponents;
if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
+ Stack->getCurrentDirective(),
/*NoDiagnose=*/true))
return;
const auto *VD = cast<ValueDecl>(
@@ -3619,7 +3774,8 @@ public:
// Skip analysis of arguments of implicitly defined map clause for target
// directives.
if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
- C->isImplicit())) {
+ C->isImplicit() &&
+ !isOpenMPTaskingDirective(Stack->getCurrentDirective()))) {
for (Stmt *CC : C->children()) {
if (CC)
Visit(CC);
@@ -3662,8 +3818,13 @@ public:
ArrayRef<Expr *> getImplicitFirstprivate() const {
return ImplicitFirstprivate;
}
- ArrayRef<Expr *> getImplicitMap(OpenMPDefaultmapClauseKind Kind) const {
- return ImplicitMap[Kind];
+ ArrayRef<Expr *> getImplicitMap(OpenMPDefaultmapClauseKind DK,
+ OpenMPMapClauseKind MK) const {
+ return ImplicitMap[DK][MK];
+ }
+ ArrayRef<OpenMPMapModifierKind>
+ getImplicitMapModifier(OpenMPDefaultmapClauseKind Kind) const {
+ return ImplicitMapModifier[Kind];
}
const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
return VarsWithInheritedDSA;
@@ -3784,19 +3945,20 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
/*OpenMPCaptureLevel=*/1);
break;
}
+ case OMPD_atomic:
+ case OMPD_critical:
+ case OMPD_section:
+ case OMPD_master:
+ break;
case OMPD_simd:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
- case OMPD_section:
case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
case OMPD_taskgroup:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_ordered:
- case OMPD_atomic:
case OMPD_target_data: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
@@ -4088,6 +4250,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
default:
llvm_unreachable("Unknown OpenMP directive");
}
+ DSAStack->setContext(CurContext);
}
int Sema::getNumberOfConstructScopes(unsigned Level) const {
@@ -4125,6 +4288,7 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
if (!WithInit)
CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
S.CurContext->addHiddenDecl(CED);
+ Sema::TentativeAnalysisScope Trap(S);
S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
return CED;
}
@@ -4262,6 +4426,12 @@ static bool checkOrderedOrderSpecified(Sema &S,
StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
ArrayRef<OMPClause *> Clauses) {
+ if (DSAStack->getCurrentDirective() == OMPD_atomic ||
+ DSAStack->getCurrentDirective() == OMPD_critical ||
+ DSAStack->getCurrentDirective() == OMPD_section ||
+ DSAStack->getCurrentDirective() == OMPD_master)
+ return S;
+
bool ErrorFound = false;
CaptureRegionUnwinderRAII CaptureRegionUnwinder(
*this, ErrorFound, DSAStack->getCurrentDirective());
@@ -4975,7 +5145,8 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
VarsWithInheritedDSAType VarsWithInheritedDSA;
bool ErrorFound = false;
ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
- if (AStmt && !CurContext->isDependentContext()) {
+ if (AStmt && !CurContext->isDependentContext() && Kind != OMPD_atomic &&
+ Kind != OMPD_critical && Kind != OMPD_section && Kind != OMPD_master) {
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
// Check default data sharing attributes for referenced variables.
@@ -5004,11 +5175,33 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
SmallVector<Expr *, 4> ImplicitFirstprivates(
DSAChecker.getImplicitFirstprivate().begin(),
DSAChecker.getImplicitFirstprivate().end());
- SmallVector<Expr *, 4> ImplicitMaps[OMPC_MAP_delete];
- for (unsigned I = 0; I < OMPC_MAP_delete; ++I) {
- ArrayRef<Expr *> ImplicitMap =
- DSAChecker.getImplicitMap(static_cast<OpenMPDefaultmapClauseKind>(I));
- ImplicitMaps[I].append(ImplicitMap.begin(), ImplicitMap.end());
+ const unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
+ SmallVector<Expr *, 4> ImplicitMaps[DefaultmapKindNum][OMPC_MAP_delete];
+ SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
+ ImplicitMapModifiers[DefaultmapKindNum];
+ SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
+ ImplicitMapModifiersLoc[DefaultmapKindNum];
+ // Get the original location of present modifier from Defaultmap clause.
+ SourceLocation PresentModifierLocs[DefaultmapKindNum];
+ for (OMPClause *C : Clauses) {
+ if (auto *DMC = dyn_cast<OMPDefaultmapClause>(C))
+ if (DMC->getDefaultmapModifier() == OMPC_DEFAULTMAP_MODIFIER_present)
+ PresentModifierLocs[DMC->getDefaultmapKind()] =
+ DMC->getDefaultmapModifierLoc();
+ }
+ for (unsigned VC = 0; VC < DefaultmapKindNum; ++VC) {
+ auto Kind = static_cast<OpenMPDefaultmapClauseKind>(VC);
+ for (unsigned I = 0; I < OMPC_MAP_delete; ++I) {
+ ArrayRef<Expr *> ImplicitMap = DSAChecker.getImplicitMap(
+ Kind, static_cast<OpenMPMapClauseKind>(I));
+ ImplicitMaps[VC][I].append(ImplicitMap.begin(), ImplicitMap.end());
+ }
+ ArrayRef<OpenMPMapModifierKind> ImplicitModifier =
+ DSAChecker.getImplicitMapModifier(Kind);
+ ImplicitMapModifiers[VC].append(ImplicitModifier.begin(),
+ ImplicitModifier.end());
+ std::fill_n(std::back_inserter(ImplicitMapModifiersLoc[VC]),
+ ImplicitModifier.size(), PresentModifierLocs[VC]);
}
// Mark taskgroup task_reduction descriptors as implicitly firstprivate.
for (OMPClause *C : Clauses) {
@@ -5034,23 +5227,26 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
ErrorFound = true;
}
}
- int ClauseKindCnt = -1;
- for (ArrayRef<Expr *> ImplicitMap : ImplicitMaps) {
- ++ClauseKindCnt;
- if (ImplicitMap.empty())
- continue;
- CXXScopeSpec MapperIdScopeSpec;
- DeclarationNameInfo MapperId;
- auto Kind = static_cast<OpenMPMapClauseKind>(ClauseKindCnt);
- if (OMPClause *Implicit = ActOnOpenMPMapClause(
- llvm::None, llvm::None, MapperIdScopeSpec, MapperId, Kind,
- /*IsMapTypeImplicit=*/true, SourceLocation(), SourceLocation(),
- ImplicitMap, OMPVarListLocTy())) {
- ClausesWithImplicit.emplace_back(Implicit);
- ErrorFound |=
- cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMap.size();
- } else {
- ErrorFound = true;
+ for (unsigned I = 0, E = DefaultmapKindNum; I < E; ++I) {
+ int ClauseKindCnt = -1;
+ for (ArrayRef<Expr *> ImplicitMap : ImplicitMaps[I]) {
+ ++ClauseKindCnt;
+ if (ImplicitMap.empty())
+ continue;
+ CXXScopeSpec MapperIdScopeSpec;
+ DeclarationNameInfo MapperId;
+ auto Kind = static_cast<OpenMPMapClauseKind>(ClauseKindCnt);
+ if (OMPClause *Implicit = ActOnOpenMPMapClause(
+ ImplicitMapModifiers[I], ImplicitMapModifiersLoc[I],
+ MapperIdScopeSpec, MapperId, Kind, /*IsMapTypeImplicit=*/true,
+ SourceLocation(), SourceLocation(), ImplicitMap,
+ OMPVarListLocTy())) {
+ ClausesWithImplicit.emplace_back(Implicit);
+ ErrorFound |= cast<OMPMapClause>(Implicit)->varlist_size() !=
+ ImplicitMap.size();
+ } else {
+ ErrorFound = true;
+ }
}
}
}
@@ -5760,7 +5956,8 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
NewStep = PerformOpenMPImplicitIntegerConversion(Step->getExprLoc(), Step)
.get();
if (NewStep)
- NewStep = VerifyIntegerConstantExpression(NewStep).get();
+ NewStep =
+ VerifyIntegerConstantExpression(NewStep, /*FIXME*/ AllowFold).get();
}
NewSteps.push_back(NewStep);
}
@@ -5798,12 +5995,45 @@ static void setPrototype(Sema &S, FunctionDecl *FD, FunctionDecl *FDWithProto,
FD->setParams(Params);
}
+void Sema::ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D) {
+ if (D->isInvalidDecl())
+ return;
+ FunctionDecl *FD = nullptr;
+ if (auto *UTemplDecl = dyn_cast<FunctionTemplateDecl>(D))
+ FD = UTemplDecl->getTemplatedDecl();
+ else
+ FD = cast<FunctionDecl>(D);
+ assert(FD && "Expected a function declaration!");
+
+ // If we are intantiating templates we do *not* apply scoped assumptions but
+ // only global ones. We apply scoped assumption to the template definition
+ // though.
+ if (!inTemplateInstantiation()) {
+ for (AssumptionAttr *AA : OMPAssumeScoped)
+ FD->addAttr(AA);
+ }
+ for (AssumptionAttr *AA : OMPAssumeGlobal)
+ FD->addAttr(AA);
+}
+
Sema::OMPDeclareVariantScope::OMPDeclareVariantScope(OMPTraitInfo &TI)
: TI(&TI), NameSuffix(TI.getMangledName()) {}
-FunctionDecl *
-Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
- Declarator &D) {
+void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
+ Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists,
+ SmallVectorImpl<FunctionDecl *> &Bases) {
+ if (!D.getIdentifier())
+ return;
+
+ OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back();
+
+ // Template specialization is an extension, check if we do it.
+ bool IsTemplated = !TemplateParamLists.empty();
+ if (IsTemplated &
+ !DVScope.TI->isExtensionActive(
+ llvm::omp::TraitProperty::implementation_extension_allow_templates))
+ return;
+
IdentifierInfo *BaseII = D.getIdentifier();
LookupResult Lookup(*this, DeclarationName(BaseII), D.getIdentifierLoc(),
LookupOrdinaryName);
@@ -5812,12 +6042,18 @@ Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType FType = TInfo->getType();
- bool IsConstexpr = D.getDeclSpec().getConstexprSpecifier() == CSK_constexpr;
- bool IsConsteval = D.getDeclSpec().getConstexprSpecifier() == CSK_consteval;
+ bool IsConstexpr =
+ D.getDeclSpec().getConstexprSpecifier() == ConstexprSpecKind::Constexpr;
+ bool IsConsteval =
+ D.getDeclSpec().getConstexprSpecifier() == ConstexprSpecKind::Consteval;
- FunctionDecl *BaseFD = nullptr;
for (auto *Candidate : Lookup) {
- auto *UDecl = dyn_cast<FunctionDecl>(Candidate->getUnderlyingDecl());
+ auto *CandidateDecl = Candidate->getUnderlyingDecl();
+ FunctionDecl *UDecl = nullptr;
+ if (IsTemplated && isa<FunctionTemplateDecl>(CandidateDecl))
+ UDecl = cast<FunctionTemplateDecl>(CandidateDecl)->getTemplatedDecl();
+ else if (!IsTemplated)
+ UDecl = dyn_cast<FunctionDecl>(CandidateDecl);
if (!UDecl)
continue;
@@ -5828,22 +6064,32 @@ Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
if (UDecl->isConsteval() && !IsConsteval)
continue;
- QualType NewType = Context.mergeFunctionTypes(
- FType, UDecl->getType(), /* OfBlockPointer */ false,
- /* Unqualified */ false, /* AllowCXX */ true);
- if (NewType.isNull())
- continue;
+ QualType UDeclTy = UDecl->getType();
+ if (!UDeclTy->isDependentType()) {
+ QualType NewType = Context.mergeFunctionTypes(
+ FType, UDeclTy, /* OfBlockPointer */ false,
+ /* Unqualified */ false, /* AllowCXX */ true);
+ if (NewType.isNull())
+ continue;
+ }
// Found a base!
- BaseFD = UDecl;
- break;
- }
- if (!BaseFD) {
- BaseFD = cast<FunctionDecl>(ActOnDeclarator(S, D));
- BaseFD->setImplicit(true);
+ Bases.push_back(UDecl);
+ }
+
+ bool UseImplicitBase = !DVScope.TI->isExtensionActive(
+ llvm::omp::TraitProperty::implementation_extension_disable_implicit_base);
+ // If no base was found we create a declaration that we use as base.
+ if (Bases.empty() && UseImplicitBase) {
+ D.setFunctionDefinitionKind(FunctionDefinitionKind::Declaration);
+ Decl *BaseD = HandleDeclarator(S, D, TemplateParamLists);
+ BaseD->setImplicit(true);
+ if (auto *BaseTemplD = dyn_cast<FunctionTemplateDecl>(BaseD))
+ Bases.push_back(BaseTemplD->getTemplatedDecl());
+ else
+ Bases.push_back(cast<FunctionDecl>(BaseD));
}
- OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back();
std::string MangledName;
MangledName += D.getIdentifier()->getName();
MangledName += getOpenMPVariantManglingSeparatorStr();
@@ -5852,17 +6098,21 @@ Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
VariantII.setMangledOpenMPVariantName(true);
D.SetIdentifier(&VariantII, D.getBeginLoc());
- return BaseFD;
}
void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
- FunctionDecl *FD, FunctionDecl *BaseFD) {
+ Decl *D, SmallVectorImpl<FunctionDecl *> &Bases) {
// Do not mark function as is used to prevent its emission if this is the
// only place where it is used.
EnterExpressionEvaluationContext Unevaluated(
*this, Sema::ExpressionEvaluationContext::Unevaluated);
- Expr *VariantFuncRef = DeclRefExpr::Create(
+ FunctionDecl *FD = nullptr;
+ if (auto *UTemplDecl = dyn_cast<FunctionTemplateDecl>(D))
+ FD = UTemplDecl->getTemplatedDecl();
+ else
+ FD = cast<FunctionDecl>(D);
+ auto *VariantFuncRef = DeclRefExpr::Create(
Context, NestedNameSpecifierLoc(), SourceLocation(), FD,
/* RefersToEnclosingVariableOrCapture */ false,
/* NameLoc */ FD->getLocation(), FD->getType(), ExprValueKind::VK_RValue);
@@ -5870,7 +6120,8 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back();
auto *OMPDeclareVariantA = OMPDeclareVariantAttr::CreateImplicit(
Context, VariantFuncRef, DVScope.TI);
- BaseFD->addAttr(OMPDeclareVariantA);
+ for (FunctionDecl *BaseFD : Bases)
+ BaseFD->addAttr(OMPDeclareVariantA);
}
ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
@@ -5891,8 +6142,17 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
return Call;
ASTContext &Context = getASTContext();
- OMPContext OMPCtx(getLangOpts().OpenMPIsDevice,
- Context.getTargetInfo().getTriple());
+ std::function<void(StringRef)> DiagUnknownTrait = [this,
+ CE](StringRef ISATrait) {
+ // TODO Track the selector locations in a way that is accessible here to
+ // improve the diagnostic location.
+ Diag(CE->getBeginLoc(), diag::warn_unknown_declare_variant_isa_trait)
+ << ISATrait;
+ };
+ TargetOMPContext OMPCtx(Context, std::move(DiagUnknownTrait),
+ getCurFunctionDecl());
+
+ QualType CalleeFnType = CalleeFnDecl->getType();
SmallVector<Expr *, 4> Exprs;
SmallVector<VariantMatchInfo, 4> VMIs;
@@ -5904,7 +6164,8 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
VariantMatchInfo VMI;
OMPTraitInfo &TI = A->getTraitInfo();
TI.getAsVariantMatchInfo(Context, VMI);
- if (!isVariantApplicableInContext(VMI, OMPCtx, /* DeviceSetOnly */ false))
+ if (!isVariantApplicableInContext(VMI, OMPCtx,
+ /* DeviceSetOnly */ false))
continue;
VMIs.push_back(VMI);
@@ -5945,8 +6206,19 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
}
NewCall = BuildCallExpr(Scope, BestExpr, LParenLoc, ArgExprs, RParenLoc,
ExecConfig);
- if (NewCall.isUsable())
- break;
+ if (NewCall.isUsable()) {
+ if (CallExpr *NCE = dyn_cast<CallExpr>(NewCall.get())) {
+ FunctionDecl *NewCalleeFnDecl = NCE->getDirectCallee();
+ QualType NewType = Context.mergeFunctionTypes(
+ CalleeFnType, NewCalleeFnDecl->getType(),
+ /* OfBlockPointer */ false,
+ /* Unqualified */ false, /* AllowCXX */ true);
+ if (!NewType.isNull())
+ break;
+ // Don't use the call if the function type was not compatible.
+ NewCall = nullptr;
+ }
+ }
}
VMIs.erase(VMIs.begin() + BestIdx);
@@ -6027,8 +6299,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
// Deal with non-constant score and user condition expressions.
auto HandleNonConstantScoresAndConditions = [this](Expr *&E,
bool IsScore) -> bool {
- llvm::APSInt Result;
- if (!E || E->isIntegerConstantExpr(Result, Context))
+ if (!E || E->isIntegerConstantExpr(Context))
return false;
if (IsScore) {
@@ -6051,7 +6322,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
// Convert VariantRef expression to the type of the original function to
// resolve possible conflicts.
- ExprResult VariantRefCast;
+ ExprResult VariantRefCast = VariantRef;
if (LangOpts.CPlusPlus) {
QualType FnPtrType;
auto *Method = dyn_cast<CXXMethodDecl>(FD);
@@ -6076,25 +6347,27 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
} else {
FnPtrType = Context.getPointerType(FD->getType());
}
- ImplicitConversionSequence ICS =
- TryImplicitConversion(VariantRef, FnPtrType.getUnqualifiedType(),
- /*SuppressUserConversions=*/false,
- AllowedExplicit::None,
- /*InOverloadResolution=*/false,
- /*CStyle=*/false,
- /*AllowObjCWritebackConversion=*/false);
- if (ICS.isFailure()) {
- Diag(VariantRef->getExprLoc(),
- diag::err_omp_declare_variant_incompat_types)
- << VariantRef->getType()
- << ((Method && !Method->isStatic()) ? FnPtrType : FD->getType())
- << VariantRef->getSourceRange();
- return None;
+ QualType VarianPtrType = Context.getPointerType(VariantRef->getType());
+ if (VarianPtrType.getUnqualifiedType() != FnPtrType.getUnqualifiedType()) {
+ ImplicitConversionSequence ICS = TryImplicitConversion(
+ VariantRef, FnPtrType.getUnqualifiedType(),
+ /*SuppressUserConversions=*/false, AllowedExplicit::None,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false);
+ if (ICS.isFailure()) {
+ Diag(VariantRef->getExprLoc(),
+ diag::err_omp_declare_variant_incompat_types)
+ << VariantRef->getType()
+ << ((Method && !Method->isStatic()) ? FnPtrType : FD->getType())
+ << VariantRef->getSourceRange();
+ return None;
+ }
+ VariantRefCast = PerformImplicitConversion(
+ VariantRef, FnPtrType.getUnqualifiedType(), AA_Converting);
+ if (!VariantRefCast.isUsable())
+ return None;
}
- VariantRefCast = PerformImplicitConversion(
- VariantRef, FnPtrType.getUnqualifiedType(), AA_Converting);
- if (!VariantRefCast.isUsable())
- return None;
// Drop previously built artificial addr_of unary op for member functions.
if (Method && !Method->isStatic()) {
Expr *PossibleAddrOfVariantRef = VariantRefCast.get();
@@ -6102,8 +6375,6 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
PossibleAddrOfVariantRef->IgnoreImplicit()))
VariantRefCast = UO->getSubExpr();
}
- } else {
- VariantRefCast = VariantRef;
}
ExprResult ER = CheckPlaceholderExpr(VariantRefCast.get());
@@ -6514,14 +6785,14 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
// loop. If test-expr is of form b relational-op var and relational-op is
// > or >= then incr-expr must cause var to increase on each iteration of
// the loop.
- llvm::APSInt Result;
- bool IsConstant = NewStep->isIntegerConstantExpr(Result, SemaRef.Context);
+ Optional<llvm::APSInt> Result =
+ NewStep->getIntegerConstantExpr(SemaRef.Context);
bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
bool IsConstNeg =
- IsConstant && Result.isSigned() && (Subtract != Result.isNegative());
+ Result && Result->isSigned() && (Subtract != Result->isNegative());
bool IsConstPos =
- IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
- bool IsConstZero = IsConstant && !Result.getBoolValue();
+ Result && Result->isSigned() && (Subtract == Result->isNegative());
+ bool IsConstZero = Result && !Result->getBoolValue();
// != with increment is treated as <; != with decrement is treated as >
if (!TestIsLessOp.hasValue())
@@ -6969,9 +7240,16 @@ calculateNumIters(Sema &SemaRef, Scope *S, SourceLocation DefaultLoc,
ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
if (!NewStep.isUsable())
return nullptr;
- llvm::APSInt LRes, URes, SRes;
- bool IsLowerConst = Lower->isIntegerConstantExpr(LRes, SemaRef.Context);
- bool IsStepConst = Step->isIntegerConstantExpr(SRes, SemaRef.Context);
+ llvm::APSInt LRes, SRes;
+ bool IsLowerConst = false, IsStepConst = false;
+ if (Optional<llvm::APSInt> Res = Lower->getIntegerConstantExpr(SemaRef.Context)) {
+ LRes = *Res;
+ IsLowerConst = true;
+ }
+ if (Optional<llvm::APSInt> Res = Step->getIntegerConstantExpr(SemaRef.Context)) {
+ SRes = *Res;
+ IsStepConst = true;
+ }
bool NoNeedToConvert = IsLowerConst && !RoundToStep &&
((!TestIsStrictOp && LRes.isNonNegative()) ||
(TestIsStrictOp && LRes.isStrictlyPositive()));
@@ -7004,7 +7282,12 @@ calculateNumIters(Sema &SemaRef, Scope *S, SourceLocation DefaultLoc,
}
NeedToReorganize = NoNeedToConvert;
}
- bool IsUpperConst = Upper->isIntegerConstantExpr(URes, SemaRef.Context);
+ llvm::APSInt URes;
+ bool IsUpperConst = false;
+ if (Optional<llvm::APSInt> Res = Upper->getIntegerConstantExpr(SemaRef.Context)) {
+ URes = *Res;
+ IsUpperConst = true;
+ }
if (NoNeedToConvert && IsLowerConst && IsUpperConst &&
(!RoundToStep || IsStepConst)) {
unsigned BW = LRes.getBitWidth() > URes.getBitWidth() ? LRes.getBitWidth()
@@ -7436,6 +7719,7 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
+ Sema::TentativeAnalysisScope Trap(SemaRef);
Diff = SemaRef.ActOnFinishFullExpr(Diff.get(), /*DiscardedValue=*/false);
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
@@ -7952,9 +8236,9 @@ static ExprResult widenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
static bool fitsInto(unsigned Bits, bool Signed, const Expr *E, Sema &SemaRef) {
if (E == nullptr)
return false;
- llvm::APSInt Result;
- if (E->isIntegerConstantExpr(Result, SemaRef.Context))
- return Signed ? Result.isSignedIntN(Bits) : Result.isIntN(Bits);
+ if (Optional<llvm::APSInt> Result =
+ E->getIntegerConstantExpr(SemaRef.Context))
+ return Signed ? Result->isSignedIntN(Bits) : Result->isIntN(Bits);
return false;
}
@@ -8227,9 +8511,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Calculate the last iteration number beforehand instead of doing this on
// each iteration. Do not do this if the number of iterations may be kfold-ed.
- llvm::APSInt Result;
- bool IsConstant =
- LastIteration.get()->isIntegerConstantExpr(Result, SemaRef.Context);
+ bool IsConstant = LastIteration.get()->isIntegerConstantExpr(SemaRef.Context);
ExprResult CalcLastIteration;
if (!IsConstant) {
ExprResult SaveRef =
@@ -8915,8 +9197,6 @@ StmtResult Sema::ActOnOpenMPSectionDirective(Stmt *AStmt,
if (!AStmt)
return StmtError();
- assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
-
setFunctionHasBranchProtectedScope();
DSAStack->setParentCancelRegion(DSAStack->isCancelRegion());
@@ -8961,8 +9241,6 @@ StmtResult Sema::ActOnOpenMPMasterDirective(Stmt *AStmt,
if (!AStmt)
return StmtError();
- assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
-
setFunctionHasBranchProtectedScope();
return OMPMasterDirective::Create(Context, StartLoc, EndLoc, AStmt);
@@ -8974,8 +9252,6 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
if (!AStmt)
return StmtError();
- assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
-
bool ErrorFound = false;
llvm::APSInt Hint;
SourceLocation HintLoc;
@@ -9693,7 +9969,6 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -9757,7 +10032,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
<< getOpenMPClauseName(MemOrderKind);
}
- Stmt *Body = CS->getCapturedStmt();
+ Stmt *Body = AStmt;
if (auto *EWC = dyn_cast<ExprWithCleanups>(Body))
Body = EWC->getSubExpr();
@@ -12620,15 +12895,16 @@ isNonNegativeIntegerValue(Expr *&ValExpr, Sema &SemaRef, OpenMPClauseKind CKind,
ValExpr = Value.get();
// The expression must evaluate to a non-negative integer value.
- llvm::APSInt Result;
- if (ValExpr->isIntegerConstantExpr(Result, SemaRef.Context) &&
- Result.isSigned() &&
- !((!StrictlyPositive && Result.isNonNegative()) ||
- (StrictlyPositive && Result.isStrictlyPositive()))) {
- SemaRef.Diag(Loc, diag::err_omp_negative_expression_in_clause)
- << getOpenMPClauseName(CKind) << (StrictlyPositive ? 1 : 0)
- << ValExpr->getSourceRange();
- return false;
+ if (Optional<llvm::APSInt> Result =
+ ValExpr->getIntegerConstantExpr(SemaRef.Context)) {
+ if (Result->isSigned() &&
+ !((!StrictlyPositive && Result->isNonNegative()) ||
+ (StrictlyPositive && Result->isStrictlyPositive()))) {
+ SemaRef.Diag(Loc, diag::err_omp_negative_expression_in_clause)
+ << getOpenMPClauseName(CKind) << (StrictlyPositive ? 1 : 0)
+ << ValExpr->getSourceRange();
+ return false;
+ }
}
if (!BuildCapture)
return true;
@@ -12681,7 +12957,8 @@ ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
E->isInstantiationDependent() || E->containsUnexpandedParameterPack())
return E;
llvm::APSInt Result;
- ExprResult ICE = VerifyIntegerConstantExpression(E, &Result);
+ ExprResult ICE =
+ VerifyIntegerConstantExpression(E, &Result, /*FIXME*/ AllowFold);
if (ICE.isInvalid())
return ExprError();
if ((StrictlyPositive && !Result.isStrictlyPositive()) ||
@@ -13263,9 +13540,9 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
// OpenMP [2.7.1, Restrictions]
// chunk_size must be a loop invariant integer expression with a positive
// value.
- llvm::APSInt Result;
- if (ValExpr->isIntegerConstantExpr(Result, Context)) {
- if (Result.isSigned() && !Result.isStrictlyPositive()) {
+ if (Optional<llvm::APSInt> Result =
+ ValExpr->getIntegerConstantExpr(Context)) {
+ if (Result->isSigned() && !Result->isStrictlyPositive()) {
Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause)
<< "schedule" << 1 << ChunkSize->getSourceRange();
return nullptr;
@@ -13522,7 +13799,9 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
- SourceLocation ExtraModifierLoc) {
+ SourceLocation ExtraModifierLoc,
+ ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ ArrayRef<SourceLocation> MotionModifiersLoc) {
SourceLocation StartLoc = Locs.StartLoc;
SourceLocation LParenLoc = Locs.LParenLoc;
SourceLocation EndLoc = Locs.EndLoc;
@@ -13599,12 +13878,14 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
IsMapTypeImplicit, ExtraModifierLoc, ColonLoc, VarList, Locs);
break;
case OMPC_to:
- Res = ActOnOpenMPToClause(VarList, ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId, Locs);
+ Res = ActOnOpenMPToClause(MotionModifiers, MotionModifiersLoc,
+ ReductionOrMapperIdScopeSpec, ReductionOrMapperId,
+ ColonLoc, VarList, Locs);
break;
case OMPC_from:
- Res = ActOnOpenMPFromClause(VarList, ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId, Locs);
+ Res = ActOnOpenMPFromClause(MotionModifiers, MotionModifiersLoc,
+ ReductionOrMapperIdScopeSpec,
+ ReductionOrMapperId, ColonLoc, VarList, Locs);
break;
case OMPC_use_device_ptr:
Res = ActOnOpenMPUseDevicePtrClause(VarList, Locs);
@@ -14003,7 +14284,10 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// from the worksharing construct.
if (isOpenMPTaskingDirective(CurrDir)) {
DVar = DSAStack->hasInnermostDSA(
- D, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
+ D,
+ [](OpenMPClauseKind C, bool AppliedToPointee) {
+ return C == OMPC_reduction && !AppliedToPointee;
+ },
[](OpenMPDirectiveKind K) {
return isOpenMPParallelDirective(K) ||
isOpenMPWorksharingDirective(K) ||
@@ -14296,7 +14580,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(
if (!isOpenMPCapturedDecl(D))
ExprCaptures.push_back(Ref->getDecl());
}
- if (TopDVar.CKind == OMPC_firstprivate ||
+ if ((TopDVar.CKind == OMPC_firstprivate && !TopDVar.PrivateCopy) ||
(!isOpenMPCapturedDecl(D) &&
Ref->getDecl()->hasAttr<OMPCaptureNoInitAttr>())) {
ExprResult RefRes = DefaultLvalueConversion(Ref);
@@ -14394,7 +14678,11 @@ public:
if (DVar.CKind != OMPC_unknown)
return true;
DSAStackTy::DSAVarData DVarPrivate = Stack->hasDSA(
- VD, isOpenMPPrivate, [](OpenMPDirectiveKind) { return true; },
+ VD,
+ [](OpenMPClauseKind C, bool AppliedToPointee) {
+ return isOpenMPPrivate(C) && !AppliedToPointee;
+ },
+ [](OpenMPDirectiveKind) { return true; },
/*FromParent=*/true);
return DVarPrivate.CKind != OMPC_unknown;
}
@@ -15027,6 +15315,17 @@ static bool actOnOMPReductionKindClause(
continue;
}
}
+ } else {
+ // Threadprivates cannot be shared between threads, so dignose if the base
+ // is a threadprivate variable.
+ DSAStackTy::DSAVarData DVar = Stack->getTopDSA(D, /*FromParent=*/false);
+ if (DVar.CKind == OMPC_threadprivate) {
+ S.Diag(ELoc, diag::err_omp_wrong_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_reduction);
+ reportOriginalDsa(S, Stack, D, DVar);
+ continue;
+ }
}
// Try to find 'declare reduction' corresponding construct before using
@@ -15153,6 +15452,7 @@ static bool actOnOMPReductionKindClause(
auto *DRDRef = DeclareReductionRef.getAs<DeclRefExpr>();
auto *DRD = cast<OMPDeclareReductionDecl>(DRDRef->getDecl());
if (DRD->getInitializer()) {
+ S.ActOnUninitializedDecl(PrivateVD);
Init = DRDRef;
RHSVD->setInit(DRDRef);
RHSVD->setInitStyle(VarDecl::CallInit);
@@ -15259,10 +15559,19 @@ static bool actOnOMPReductionKindClause(
llvm_unreachable("Unexpected reduction operation");
}
}
- if (Init && DeclareReductionRef.isUnset())
+ if (Init && DeclareReductionRef.isUnset()) {
S.AddInitializerToDecl(RHSVD, Init, /*DirectInit=*/false);
- else if (!Init)
+ // Store initializer for single element in private copy. Will be used
+ // during codegen.
+ PrivateVD->setInit(RHSVD->getInit());
+ PrivateVD->setInitStyle(RHSVD->getInitStyle());
+ } else if (!Init) {
S.ActOnUninitializedDecl(RHSVD);
+ // Store initializer for single element in private copy. Will be used
+ // during codegen.
+ PrivateVD->setInit(RHSVD->getInit());
+ PrivateVD->setInitStyle(RHSVD->getInitStyle());
+ }
if (RHSVD->isInvalidDecl())
continue;
if (!RHSVD->hasInit() &&
@@ -15276,10 +15585,6 @@ static bool actOnOMPReductionKindClause(
<< D;
continue;
}
- // Store initializer for single element in private copy. Will be used during
- // codegen.
- PrivateVD->setInit(RHSVD->getInit());
- PrivateVD->setInitStyle(RHSVD->getInitStyle());
DeclRefExpr *PrivateDRE = buildDeclRefExpr(S, PrivateVD, PrivateTy, ELoc);
ExprResult ReductionOp;
if (DeclareReductionRef.isUsable()) {
@@ -15290,12 +15595,12 @@ static bool actOnOMPReductionKindClause(
if (!BasePath.empty()) {
LHS = S.DefaultLvalueConversion(LHS.get());
RHS = S.DefaultLvalueConversion(RHS.get());
- LHS = ImplicitCastExpr::Create(Context, PtrRedTy,
- CK_UncheckedDerivedToBase, LHS.get(),
- &BasePath, LHS.get()->getValueKind());
- RHS = ImplicitCastExpr::Create(Context, PtrRedTy,
- CK_UncheckedDerivedToBase, RHS.get(),
- &BasePath, RHS.get()->getValueKind());
+ LHS = ImplicitCastExpr::Create(
+ Context, PtrRedTy, CK_UncheckedDerivedToBase, LHS.get(), &BasePath,
+ LHS.get()->getValueKind(), FPOptionsOverride());
+ RHS = ImplicitCastExpr::Create(
+ Context, PtrRedTy, CK_UncheckedDerivedToBase, RHS.get(), &BasePath,
+ RHS.get()->getValueKind(), FPOptionsOverride());
}
FunctionProtoType::ExtProtoInfo EPI;
QualType Params[] = {PtrRedTy, PtrRedTy};
@@ -15305,7 +15610,8 @@ static bool actOnOMPReductionKindClause(
S.DefaultLvalueConversion(DeclareReductionRef.get()).get());
Expr *Args[] = {LHS.get(), RHS.get()};
ReductionOp =
- CallExpr::Create(Context, OVE, Args, Context.VoidTy, VK_RValue, ELoc);
+ CallExpr::Create(Context, OVE, Args, Context.VoidTy, VK_RValue, ELoc,
+ S.CurFPFeatureOverrides());
} else {
ReductionOp = S.BuildBinOp(
Stack->getCurScope(), ReductionId.getBeginLoc(), BOK, LHSDRE, RHSDRE);
@@ -15465,7 +15771,8 @@ static bool actOnOMPReductionKindClause(
// correct analysis of in_reduction clauses.
if (CurrDir == OMPD_taskgroup && ClauseKind == OMPC_task_reduction)
Modifier = OMPC_REDUCTION_task;
- Stack->addDSA(D, RefExpr->IgnoreParens(), OMPC_reduction, Ref, Modifier);
+ Stack->addDSA(D, RefExpr->IgnoreParens(), OMPC_reduction, Ref, Modifier,
+ ASE || OASE);
if (Modifier == OMPC_REDUCTION_task &&
(CurrDir == OMPD_taskgroup ||
((isOpenMPParallelDirective(CurrDir) ||
@@ -15736,12 +16043,12 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
// Warn about zero linear step (it would be probably better specified as
// making corresponding variables 'const').
- llvm::APSInt Result;
- bool IsConstant = StepExpr->isIntegerConstantExpr(Result, Context);
- if (IsConstant && !Result.isNegative() && !Result.isStrictlyPositive())
- Diag(StepLoc, diag::warn_omp_linear_step_zero) << Vars[0]
- << (Vars.size() > 1);
- if (!IsConstant && CalcStep.isUsable()) {
+ if (Optional<llvm::APSInt> Result =
+ StepExpr->getIntegerConstantExpr(Context)) {
+ if (!Result->isNegative() && !Result->isStrictlyPositive())
+ Diag(StepLoc, diag::warn_omp_linear_step_zero)
+ << Vars[0] << (Vars.size() > 1);
+ } else if (CalcStep.isUsable()) {
// Calculate the step beforehand instead of doing this on each iteration.
// (This is not used if the number of iterations may be kfold-ed).
CalcStepExpr = CalcStep.get();
@@ -16645,11 +16952,14 @@ namespace {
class MapBaseChecker final : public StmtVisitor<MapBaseChecker, bool> {
Sema &SemaRef;
OpenMPClauseKind CKind = OMPC_unknown;
+ OpenMPDirectiveKind DKind = OMPD_unknown;
OMPClauseMappableExprCommon::MappableExprComponentList &Components;
+ bool IsNonContiguous = false;
bool NoDiagnose = false;
const Expr *RelevantExpr = nullptr;
bool AllowUnitySizeArraySection = true;
bool AllowWholeSizeArraySection = true;
+ bool AllowAnotherPtr = true;
SourceLocation ELoc;
SourceRange ERange;
@@ -16674,7 +16984,7 @@ public:
assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
RelevantExpr = DRE;
// Record the component.
- Components.emplace_back(DRE, DRE->getDecl());
+ Components.emplace_back(DRE, DRE->getDecl(), IsNonContiguous);
return true;
}
@@ -16746,7 +17056,7 @@ public:
AllowWholeSizeArraySection = false;
// Record the component.
- Components.emplace_back(ME, FD);
+ Components.emplace_back(ME, FD, IsNonContiguous);
return RelevantExpr || Visit(E);
}
@@ -16784,7 +17094,7 @@ public:
}
// Record the component - we don't have any declaration associated.
- Components.emplace_back(AE, nullptr);
+ Components.emplace_back(AE, nullptr, IsNonContiguous);
return RelevantExpr || Visit(E);
}
@@ -16823,6 +17133,13 @@ public:
// pointer. Otherwise, only unitary sections are accepted.
if (NotWhole || IsPointer)
AllowWholeSizeArraySection = false;
+ } else if (DKind == OMPD_target_update &&
+ SemaRef.getLangOpts().OpenMP >= 50) {
+ if (IsPointer && !AllowAnotherPtr)
+ SemaRef.Diag(ELoc, diag::err_omp_section_length_undefined)
+ << /*array of unknown bound */ 1;
+ else
+ IsNonContiguous = true;
} else if (AllowUnitySizeArraySection && NotUnity) {
// A unity or whole array section is not allowed and that is not
// compatible with the properties of the current array section.
@@ -16832,6 +17149,9 @@ public:
return false;
}
+ if (IsPointer)
+ AllowAnotherPtr = false;
+
if (const auto *TE = dyn_cast<CXXThisExpr>(E)) {
Expr::EvalResult ResultR;
Expr::EvalResult ResultL;
@@ -16857,14 +17177,14 @@ public:
}
// Record the component - we don't have any declaration associated.
- Components.emplace_back(OASE, nullptr);
+ Components.emplace_back(OASE, nullptr, /*IsNonContiguous=*/false);
return RelevantExpr || Visit(E);
}
bool VisitOMPArrayShapingExpr(OMPArrayShapingExpr *E) {
Expr *Base = E->getBase();
// Record the component - we don't have any declaration associated.
- Components.emplace_back(E, nullptr);
+ Components.emplace_back(E, nullptr, IsNonContiguous);
return Visit(Base->IgnoreParenImpCasts());
}
@@ -16877,7 +17197,7 @@ public:
}
if (!RelevantExpr) {
// Record the component if haven't found base decl.
- Components.emplace_back(UO, nullptr);
+ Components.emplace_back(UO, nullptr, /*IsNonContiguous=*/false);
}
return RelevantExpr || Visit(UO->getSubExpr()->IgnoreParenImpCasts());
}
@@ -16893,7 +17213,7 @@ public:
// know the other subtree is just an offset)
Expr *LE = BO->getLHS()->IgnoreParenImpCasts();
Expr *RE = BO->getRHS()->IgnoreParenImpCasts();
- Components.emplace_back(BO, nullptr);
+ Components.emplace_back(BO, nullptr, false);
assert((LE->getType().getTypePtr() == BO->getType().getTypePtr() ||
RE->getType().getTypePtr() == BO->getType().getTypePtr()) &&
"Either LHS or RHS have base decl inside");
@@ -16904,7 +17224,12 @@ public:
bool VisitCXXThisExpr(CXXThisExpr *CTE) {
assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
RelevantExpr = CTE;
- Components.emplace_back(CTE, nullptr);
+ Components.emplace_back(CTE, nullptr, IsNonContiguous);
+ return true;
+ }
+ bool VisitCXXOperatorCallExpr(CXXOperatorCallExpr *COCE) {
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ Components.emplace_back(COCE, nullptr, IsNonContiguous);
return true;
}
bool VisitStmt(Stmt *) {
@@ -16915,10 +17240,10 @@ public:
return RelevantExpr;
}
explicit MapBaseChecker(
- Sema &SemaRef, OpenMPClauseKind CKind,
+ Sema &SemaRef, OpenMPClauseKind CKind, OpenMPDirectiveKind DKind,
OMPClauseMappableExprCommon::MappableExprComponentList &Components,
bool NoDiagnose, SourceLocation &ELoc, SourceRange &ERange)
- : SemaRef(SemaRef), CKind(CKind), Components(Components),
+ : SemaRef(SemaRef), CKind(CKind), DKind(DKind), Components(Components),
NoDiagnose(NoDiagnose), ELoc(ELoc), ERange(ERange) {}
};
} // namespace
@@ -16930,13 +17255,30 @@ public:
static const Expr *checkMapClauseExpressionBase(
Sema &SemaRef, Expr *E,
OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
- OpenMPClauseKind CKind, bool NoDiagnose) {
+ OpenMPClauseKind CKind, OpenMPDirectiveKind DKind, bool NoDiagnose) {
SourceLocation ELoc = E->getExprLoc();
SourceRange ERange = E->getSourceRange();
- MapBaseChecker Checker(SemaRef, CKind, CurComponents, NoDiagnose, ELoc,
+ MapBaseChecker Checker(SemaRef, CKind, DKind, CurComponents, NoDiagnose, ELoc,
ERange);
- if (Checker.Visit(E->IgnoreParens()))
+ if (Checker.Visit(E->IgnoreParens())) {
+ // Check if the highest dimension array section has length specified
+ if (SemaRef.getLangOpts().OpenMP >= 50 && !CurComponents.empty() &&
+ (CKind == OMPC_to || CKind == OMPC_from)) {
+ auto CI = CurComponents.rbegin();
+ auto CE = CurComponents.rend();
+ for (; CI != CE; ++CI) {
+ const auto *OASE =
+ dyn_cast<OMPArraySectionExpr>(CI->getAssociatedExpression());
+ if (!OASE)
+ continue;
+ if (OASE && OASE->getLength())
+ break;
+ SemaRef.Diag(ELoc, diag::err_array_section_does_not_specify_length)
+ << ERange;
+ }
+ }
return Checker.getFoundBase();
+ }
return nullptr;
}
@@ -17347,6 +17689,7 @@ static void checkMappableExpressionList(
auto &DeclNames = SemaRef.getASTContext().DeclarationNames;
MapperId.setName(DeclNames.getIdentifier(
&SemaRef.getASTContext().Idents.get("default")));
+ MapperId.setLoc(StartLoc);
}
// Iterators to find the current unresolved mapper expression.
@@ -17413,7 +17756,8 @@ static void checkMappableExpressionList(
// Obtain the array or member expression bases if required. Also, fill the
// components array with all the components identified in the process.
const Expr *BE = checkMapClauseExpressionBase(
- SemaRef, SimpleExpr, CurComponents, CKind, /*NoDiagnose=*/false);
+ SemaRef, SimpleExpr, CurComponents, CKind, DSAS->getCurrentDirective(),
+ /*NoDiagnose=*/false);
if (!BE)
continue;
@@ -17478,6 +17822,7 @@ static void checkMappableExpressionList(
/*CurrentRegionOnly=*/true, CurComponents, CKind))
break;
if (CKind == OMPC_map &&
+ (SemaRef.getLangOpts().OpenMP <= 45 || StartLoc.isValid()) &&
checkMapConflicts(SemaRef, DSAS, CurDeclaration, SimpleExpr,
/*CurrentRegionOnly=*/false, CurComponents, CKind))
break;
@@ -17623,9 +17968,9 @@ OMPClause *Sema::ActOnOpenMPMapClause(
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers) {
- OpenMPMapModifierKind Modifiers[] = {OMPC_MAP_MODIFIER_unknown,
- OMPC_MAP_MODIFIER_unknown,
- OMPC_MAP_MODIFIER_unknown};
+ OpenMPMapModifierKind Modifiers[] = {
+ OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
+ OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown};
SourceLocation ModifiersLoc[NumberOfOMPMapClauseModifiers];
// Process map-type-modifiers, flag errors for duplicate modifiers.
@@ -17944,10 +18289,10 @@ QualType Sema::ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
return MapperType;
}
-OMPDeclareMapperDecl *Sema::ActOnOpenMPDeclareMapperDirectiveStart(
+Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
- Decl *PrevDeclInScope) {
+ Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope) {
LookupResult Lookup(*this, Name, SourceLocation(), LookupOMPMapperName,
forRedeclarationInCurContext());
// [OpenMP 5.0], 2.19.7.3 declare mapper Directive, Restrictions
@@ -18007,48 +18352,51 @@ OMPDeclareMapperDecl *Sema::ActOnOpenMPDeclareMapperDirectiveStart(
Invalid = true;
}
auto *DMD = OMPDeclareMapperDecl::Create(Context, DC, StartLoc, Name,
- MapperType, VN, PrevDMD);
- DC->addDecl(DMD);
+ MapperType, VN, Clauses, PrevDMD);
+ if (S)
+ PushOnScopeChains(DMD, S);
+ else
+ DC->addDecl(DMD);
DMD->setAccess(AS);
if (Invalid)
DMD->setInvalidDecl();
- // Enter new function scope.
- PushFunctionScope();
- setFunctionHasBranchProtectedScope();
-
- CurContext = DMD;
+ auto *VD = cast<DeclRefExpr>(MapperVarRef)->getDecl();
+ VD->setDeclContext(DMD);
+ VD->setLexicalDeclContext(DMD);
+ DMD->addDecl(VD);
+ DMD->setMapperVarRef(MapperVarRef);
- return DMD;
+ return DeclGroupPtrTy::make(DeclGroupRef(DMD));
}
-void Sema::ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
- Scope *S,
- QualType MapperType,
- SourceLocation StartLoc,
- DeclarationName VN) {
- VarDecl *VD = buildVarDecl(*this, StartLoc, MapperType, VN.getAsString());
+ExprResult
+Sema::ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType,
+ SourceLocation StartLoc,
+ DeclarationName VN) {
+ TypeSourceInfo *TInfo =
+ Context.getTrivialTypeSourceInfo(MapperType, StartLoc);
+ auto *VD = VarDecl::Create(Context, Context.getTranslationUnitDecl(),
+ StartLoc, StartLoc, VN.getAsIdentifierInfo(),
+ MapperType, TInfo, SC_None);
if (S)
- PushOnScopeChains(VD, S);
- else
- DMD->addDecl(VD);
- Expr *MapperVarRefExpr = buildDeclRefExpr(*this, VD, MapperType, StartLoc);
- DMD->setMapperVarRef(MapperVarRefExpr);
+ PushOnScopeChains(VD, S, /*AddToContext=*/false);
+ Expr *E = buildDeclRefExpr(*this, VD, MapperType, StartLoc);
+ DSAStack->addDeclareMapperVarRef(E);
+ return E;
}
-Sema::DeclGroupPtrTy
-Sema::ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
- ArrayRef<OMPClause *> ClauseList) {
- PopDeclContext();
- PopFunctionScopeInfo();
-
- if (D) {
- if (S)
- PushOnScopeChains(D, S, /*AddToContext=*/false);
- D->CreateClauses(Context, ClauseList);
- }
+bool Sema::isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const {
+ assert(LangOpts.OpenMP && "Expected OpenMP mode.");
+ const Expr *Ref = DSAStack->getDeclareMapperVarRef();
+ if (const auto *DRE = cast_or_null<DeclRefExpr>(Ref))
+ return VD->getCanonicalDecl() == DRE->getDecl()->getCanonicalDecl();
+ return true;
+}
- return DeclGroupPtrTy::make(DeclGroupRef(D));
+const ValueDecl *Sema::getOpenMPDeclareMapperVarName() const {
+ assert(LangOpts.OpenMP && "Expected OpenMP mode.");
+ return cast<DeclRefExpr>(DSAStack->getDeclareMapperVarRef())->getDecl();
}
OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
@@ -18273,9 +18621,9 @@ OMPClause *Sema::ActOnOpenMPDistScheduleClause(
// OpenMP [2.7.1, Restrictions]
// chunk_size must be a loop invariant integer expression with a positive
// value.
- llvm::APSInt Result;
- if (ValExpr->isIntegerConstantExpr(Result, Context)) {
- if (Result.isSigned() && !Result.isStrictlyPositive()) {
+ if (Optional<llvm::APSInt> Result =
+ ValExpr->getIntegerConstantExpr(Context)) {
+ if (Result->isSigned() && !Result->isStrictlyPositive()) {
Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause)
<< "dist_schedule" << ChunkSize->getSourceRange();
return nullptr;
@@ -18326,20 +18674,38 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause(
bool isDefaultmapKind = (Kind != OMPC_DEFAULTMAP_unknown) ||
(LangOpts.OpenMP >= 50 && KindLoc.isInvalid());
if (!isDefaultmapKind || !isDefaultmapModifier) {
- std::string ModifierValue = "'alloc', 'from', 'to', 'tofrom', "
+ StringRef KindValue = "'scalar', 'aggregate', 'pointer'";
+ if (LangOpts.OpenMP == 50) {
+ StringRef ModifierValue = "'alloc', 'from', 'to', 'tofrom', "
"'firstprivate', 'none', 'default'";
- std::string KindValue = "'scalar', 'aggregate', 'pointer'";
- if (!isDefaultmapKind && isDefaultmapModifier) {
- Diag(KindLoc, diag::err_omp_unexpected_clause_value)
- << KindValue << getOpenMPClauseName(OMPC_defaultmap);
- } else if (isDefaultmapKind && !isDefaultmapModifier) {
- Diag(MLoc, diag::err_omp_unexpected_clause_value)
- << ModifierValue << getOpenMPClauseName(OMPC_defaultmap);
+ if (!isDefaultmapKind && isDefaultmapModifier) {
+ Diag(KindLoc, diag::err_omp_unexpected_clause_value)
+ << KindValue << getOpenMPClauseName(OMPC_defaultmap);
+ } else if (isDefaultmapKind && !isDefaultmapModifier) {
+ Diag(MLoc, diag::err_omp_unexpected_clause_value)
+ << ModifierValue << getOpenMPClauseName(OMPC_defaultmap);
+ } else {
+ Diag(MLoc, diag::err_omp_unexpected_clause_value)
+ << ModifierValue << getOpenMPClauseName(OMPC_defaultmap);
+ Diag(KindLoc, diag::err_omp_unexpected_clause_value)
+ << KindValue << getOpenMPClauseName(OMPC_defaultmap);
+ }
} else {
- Diag(MLoc, diag::err_omp_unexpected_clause_value)
- << ModifierValue << getOpenMPClauseName(OMPC_defaultmap);
- Diag(KindLoc, diag::err_omp_unexpected_clause_value)
- << KindValue << getOpenMPClauseName(OMPC_defaultmap);
+ StringRef ModifierValue =
+ "'alloc', 'from', 'to', 'tofrom', "
+ "'firstprivate', 'none', 'default', 'present'";
+ if (!isDefaultmapKind && isDefaultmapModifier) {
+ Diag(KindLoc, diag::err_omp_unexpected_clause_value)
+ << KindValue << getOpenMPClauseName(OMPC_defaultmap);
+ } else if (isDefaultmapKind && !isDefaultmapModifier) {
+ Diag(MLoc, diag::err_omp_unexpected_clause_value)
+ << ModifierValue << getOpenMPClauseName(OMPC_defaultmap);
+ } else {
+ Diag(MLoc, diag::err_omp_unexpected_clause_value)
+ << ModifierValue << getOpenMPClauseName(OMPC_defaultmap);
+ Diag(KindLoc, diag::err_omp_unexpected_clause_value)
+ << KindValue << getOpenMPClauseName(OMPC_defaultmap);
+ }
}
return nullptr;
}
@@ -18377,14 +18743,14 @@ bool Sema::ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc) {
Diag(Loc, diag::err_omp_region_not_file_context);
return false;
}
- ++DeclareTargetNestingLevel;
+ DeclareTargetNesting.push_back(Loc);
return true;
}
void Sema::ActOnFinishOpenMPDeclareTargetDirective() {
- assert(DeclareTargetNestingLevel > 0 &&
+ assert(!DeclareTargetNesting.empty() &&
"Unexpected ActOnFinishOpenMPDeclareTargetDirective");
- --DeclareTargetNestingLevel;
+ DeclareTargetNesting.pop_back();
}
NamedDecl *
@@ -18437,19 +18803,25 @@ void Sema::ActOnOpenMPDeclareTargetName(
(ND->isUsed(/*CheckUsedAttr=*/false) || ND->isReferenced()))
Diag(Loc, diag::warn_omp_declare_target_after_first_use);
+ auto *VD = cast<ValueDecl>(ND);
Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(cast<ValueDecl>(ND));
- if (DevTy.hasValue() && *DevTy != DT) {
+ OMPDeclareTargetDeclAttr::getDeviceType(VD);
+ Optional<SourceLocation> AttrLoc = OMPDeclareTargetDeclAttr::getLocation(VD);
+ if (DevTy.hasValue() && *DevTy != DT &&
+ (DeclareTargetNesting.empty() ||
+ *AttrLoc != DeclareTargetNesting.back())) {
Diag(Loc, diag::err_omp_device_type_mismatch)
<< OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(DT)
<< OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(*DevTy);
return;
}
Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(cast<ValueDecl>(ND));
- if (!Res) {
- auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(Context, MT, DT,
- SourceRange(Loc, Loc));
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
+ if (!Res || (!DeclareTargetNesting.empty() &&
+ *AttrLoc == DeclareTargetNesting.back())) {
+ auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
+ Context, MT, DT, DeclareTargetNesting.size() + 1,
+ SourceRange(Loc, Loc));
ND->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(ND, A);
@@ -18541,7 +18913,9 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
isa<FunctionTemplateDecl>(D)) {
auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
Context, OMPDeclareTargetDeclAttr::MT_To,
- OMPDeclareTargetDeclAttr::DT_Any, SourceRange(IdLoc, IdLoc));
+ OMPDeclareTargetDeclAttr::DT_Any, DeclareTargetNesting.size(),
+ SourceRange(DeclareTargetNesting.back(),
+ DeclareTargetNesting.back()));
D->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
@@ -18554,11 +18928,31 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
checkDeclInTargetContext(E->getExprLoc(), E->getSourceRange(), *this, D);
}
-OMPClause *Sema::ActOnOpenMPToClause(ArrayRef<Expr *> VarList,
- CXXScopeSpec &MapperIdScopeSpec,
- DeclarationNameInfo &MapperId,
- const OMPVarListLocTy &Locs,
- ArrayRef<Expr *> UnresolvedMappers) {
+OMPClause *Sema::ActOnOpenMPToClause(
+ ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ ArrayRef<SourceLocation> MotionModifiersLoc,
+ CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
+ SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers) {
+ OpenMPMotionModifierKind Modifiers[] = {OMPC_MOTION_MODIFIER_unknown,
+ OMPC_MOTION_MODIFIER_unknown};
+ SourceLocation ModifiersLoc[NumberOfOMPMotionModifiers];
+
+ // Process motion-modifiers, flag errors for duplicate modifiers.
+ unsigned Count = 0;
+ for (unsigned I = 0, E = MotionModifiers.size(); I < E; ++I) {
+ if (MotionModifiers[I] != OMPC_MOTION_MODIFIER_unknown &&
+ llvm::find(Modifiers, MotionModifiers[I]) != std::end(Modifiers)) {
+ Diag(MotionModifiersLoc[I], diag::err_omp_duplicate_motion_modifier);
+ continue;
+ }
+ assert(Count < NumberOfOMPMotionModifiers &&
+ "Modifiers exceed the allowed number of motion modifiers");
+ Modifiers[Count] = MotionModifiers[I];
+ ModifiersLoc[Count] = MotionModifiersLoc[I];
+ ++Count;
+ }
+
MappableVarListInfo MVLI(VarList);
checkMappableExpressionList(*this, DSAStack, OMPC_to, MVLI, Locs.StartLoc,
MapperIdScopeSpec, MapperId, UnresolvedMappers);
@@ -18567,15 +18961,35 @@ OMPClause *Sema::ActOnOpenMPToClause(ArrayRef<Expr *> VarList,
return OMPToClause::Create(
Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
- MVLI.VarComponents, MVLI.UDMapperList,
+ MVLI.VarComponents, MVLI.UDMapperList, Modifiers, ModifiersLoc,
MapperIdScopeSpec.getWithLocInContext(Context), MapperId);
}
-OMPClause *Sema::ActOnOpenMPFromClause(ArrayRef<Expr *> VarList,
- CXXScopeSpec &MapperIdScopeSpec,
- DeclarationNameInfo &MapperId,
- const OMPVarListLocTy &Locs,
- ArrayRef<Expr *> UnresolvedMappers) {
+OMPClause *Sema::ActOnOpenMPFromClause(
+ ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ ArrayRef<SourceLocation> MotionModifiersLoc,
+ CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
+ SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers) {
+ OpenMPMotionModifierKind Modifiers[] = {OMPC_MOTION_MODIFIER_unknown,
+ OMPC_MOTION_MODIFIER_unknown};
+ SourceLocation ModifiersLoc[NumberOfOMPMotionModifiers];
+
+ // Process motion-modifiers, flag errors for duplicate modifiers.
+ unsigned Count = 0;
+ for (unsigned I = 0, E = MotionModifiers.size(); I < E; ++I) {
+ if (MotionModifiers[I] != OMPC_MOTION_MODIFIER_unknown &&
+ llvm::find(Modifiers, MotionModifiers[I]) != std::end(Modifiers)) {
+ Diag(MotionModifiersLoc[I], diag::err_omp_duplicate_motion_modifier);
+ continue;
+ }
+ assert(Count < NumberOfOMPMotionModifiers &&
+ "Modifiers exceed the allowed number of motion modifiers");
+ Modifiers[Count] = MotionModifiers[I];
+ ModifiersLoc[Count] = MotionModifiersLoc[I];
+ ++Count;
+ }
+
MappableVarListInfo MVLI(VarList);
checkMappableExpressionList(*this, DSAStack, OMPC_from, MVLI, Locs.StartLoc,
MapperIdScopeSpec, MapperId, UnresolvedMappers);
@@ -18584,7 +18998,7 @@ OMPClause *Sema::ActOnOpenMPFromClause(ArrayRef<Expr *> VarList,
return OMPFromClause::Create(
Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
- MVLI.VarComponents, MVLI.UDMapperList,
+ MVLI.VarComponents, MVLI.UDMapperList, Modifiers, ModifiersLoc,
MapperIdScopeSpec.getWithLocInContext(Context), MapperId);
}
@@ -18661,8 +19075,8 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
// only need a component.
MVLI.VarBaseDeclarations.push_back(D);
MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1);
- MVLI.VarComponents.back().push_back(
- OMPClauseMappableExprCommon::MappableComponent(SimpleRefExpr, D));
+ MVLI.VarComponents.back().emplace_back(SimpleRefExpr, D,
+ /*IsNonContiguous=*/false);
}
if (MVLI.ProcessedVarList.empty())
@@ -18713,8 +19127,8 @@ OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
if (VD && (isa<OMPArraySectionExpr>(RefExpr->IgnoreParenImpCasts()) ||
isa<ArraySubscriptExpr>(RefExpr->IgnoreParenImpCasts())))
Component = DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get();
- MVLI.VarComponents.back().push_back(
- OMPClauseMappableExprCommon::MappableComponent(Component, D));
+ MVLI.VarComponents.back().emplace_back(Component, D,
+ /*IsNonContiguous=*/false);
}
if (MVLI.ProcessedVarList.empty())
@@ -18780,7 +19194,8 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
// Store the components in the stack so that they can be used to check
// against other clauses later on.
- OMPClauseMappableExprCommon::MappableComponent MC(SimpleRefExpr, D);
+ OMPClauseMappableExprCommon::MappableComponent MC(
+ SimpleRefExpr, D, /*IsNonContiguous=*/false);
DSAStack->addMappableExpressionComponents(
D, MC, /*WhereFoundClauseKind=*/OMPC_is_device_ptr);
@@ -19022,7 +19437,7 @@ OMPClause *Sema::ActOnOpenMPUsesAllocatorClause(
[](const UsesAllocatorsData &D) { return D.AllocatorTraits; }) &&
!findOMPAlloctraitT(*this, StartLoc, DSAStack))
return nullptr;
- llvm::SmallSet<CanonicalDeclPtr<Decl>, 4> PredefinedAllocators;
+ llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> PredefinedAllocators;
for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
StringRef Allocator =
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 8635397f4806..7fe7466725fa 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -137,6 +137,7 @@ ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
+ ICR_Conversion,
ICR_OCL_Scalar_Widening,
ICR_Complex_Real_Conversion,
ICR_Conversion,
@@ -174,6 +175,7 @@ static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
"Compatible-types conversion",
"Derived-to-base conversion",
"Vector conversion",
+ "SVE Vector conversion",
"Vector splat",
"Complex-real conversion",
"Block Pointer conversion",
@@ -346,7 +348,6 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
ToType->isRealFloatingType()) {
if (IgnoreFloatToIntegralConversion)
return NK_Not_Narrowing;
- llvm::APSInt IntConstantValue;
const Expr *Initializer = IgnoreNarrowingConversion(Ctx, Converted);
assert(Initializer && "Unknown conversion expression");
@@ -354,19 +355,20 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
if (Initializer->isValueDependent())
return NK_Dependent_Narrowing;
- if (Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
+ if (Optional<llvm::APSInt> IntConstantValue =
+ Initializer->getIntegerConstantExpr(Ctx)) {
// Convert the integer to the floating type.
llvm::APFloat Result(Ctx.getFloatTypeSemantics(ToType));
- Result.convertFromAPInt(IntConstantValue, IntConstantValue.isSigned(),
+ Result.convertFromAPInt(*IntConstantValue, IntConstantValue->isSigned(),
llvm::APFloat::rmNearestTiesToEven);
// And back.
- llvm::APSInt ConvertedValue = IntConstantValue;
+ llvm::APSInt ConvertedValue = *IntConstantValue;
bool ignored;
Result.convertToInteger(ConvertedValue,
llvm::APFloat::rmTowardZero, &ignored);
// If the resulting value is different, this was a narrowing conversion.
- if (IntConstantValue != ConvertedValue) {
- ConstantValue = APValue(IntConstantValue);
+ if (*IntConstantValue != ConvertedValue) {
+ ConstantValue = APValue(*IntConstantValue);
ConstantType = Initializer->getType();
return NK_Constant_Narrowing;
}
@@ -430,17 +432,18 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
(FromWidth == ToWidth && FromSigned != ToSigned) ||
(FromSigned && !ToSigned)) {
// Not all values of FromType can be represented in ToType.
- llvm::APSInt InitializerValue;
const Expr *Initializer = IgnoreNarrowingConversion(Ctx, Converted);
// If it's value-dependent, we can't tell whether it's narrowing.
if (Initializer->isValueDependent())
return NK_Dependent_Narrowing;
- if (!Initializer->isIntegerConstantExpr(InitializerValue, Ctx)) {
+ Optional<llvm::APSInt> OptInitializerValue;
+ if (!(OptInitializerValue = Initializer->getIntegerConstantExpr(Ctx))) {
// Such conversions on variables are always narrowing.
return NK_Variable_Narrowing;
}
+ llvm::APSInt &InitializerValue = *OptInitializerValue;
bool Narrowing = false;
if (FromWidth < ToWidth) {
// Negative -> unsigned is narrowing. Otherwise, more bits is never
@@ -1491,17 +1494,9 @@ Sema::TryImplicitConversion(Expr *From, QualType ToType,
/// converted expression. Flavor is the kind of conversion we're
/// performing, used in the error message. If @p AllowExplicit,
/// explicit user-defined conversions are permitted.
-ExprResult
-Sema::PerformImplicitConversion(Expr *From, QualType ToType,
- AssignmentAction Action, bool AllowExplicit) {
- ImplicitConversionSequence ICS;
- return PerformImplicitConversion(From, ToType, Action, AllowExplicit, ICS);
-}
-
-ExprResult
-Sema::PerformImplicitConversion(Expr *From, QualType ToType,
- AssignmentAction Action, bool AllowExplicit,
- ImplicitConversionSequence& ICS) {
+ExprResult Sema::PerformImplicitConversion(Expr *From, QualType ToType,
+ AssignmentAction Action,
+ bool AllowExplicit) {
if (checkPlaceholderForOverload(*this, From))
return ExprError();
@@ -1512,13 +1507,13 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (getLangOpts().ObjC)
CheckObjCBridgeRelatedConversions(From->getBeginLoc(), ToType,
From->getType(), From);
- ICS = ::TryImplicitConversion(*this, From, ToType,
- /*SuppressUserConversions=*/false,
- AllowExplicit ? AllowedExplicit::All
- : AllowedExplicit::None,
- /*InOverloadResolution=*/false,
- /*CStyle=*/false, AllowObjCWritebackConversion,
- /*AllowObjCConversionOnExplicit=*/false);
+ ImplicitConversionSequence ICS = ::TryImplicitConversion(
+ *this, From, ToType,
+ /*SuppressUserConversions=*/false,
+ AllowExplicit ? AllowedExplicit::All : AllowedExplicit::None,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false, AllowObjCWritebackConversion,
+ /*AllowObjCConversionOnExplicit=*/false);
return PerformImplicitConversion(From, ToType, ICS, Action);
}
@@ -1649,6 +1644,13 @@ static bool IsVectorConversion(Sema &S, QualType FromType,
}
}
+ if (ToType->isSizelessBuiltinType() || FromType->isSizelessBuiltinType())
+ if (S.Context.areCompatibleSveTypes(FromType, ToType) ||
+ S.Context.areLaxCompatibleSveTypes(FromType, ToType)) {
+ ICK = ICK_SVE_Vector_Conversion;
+ return true;
+ }
+
// We can perform the conversion between vector types in the following cases:
// 1)vector types are equivalent AltiVec and GCC vector types
// 2)lax vector conversions are permitted and the vector types are of the
@@ -2183,21 +2185,22 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// compatibility.
if (From) {
if (FieldDecl *MemberDecl = From->getSourceBitField()) {
- llvm::APSInt BitWidth;
+ Optional<llvm::APSInt> BitWidth;
if (FromType->isIntegralType(Context) &&
- MemberDecl->getBitWidth()->isIntegerConstantExpr(BitWidth, Context)) {
- llvm::APSInt ToSize(BitWidth.getBitWidth(), BitWidth.isUnsigned());
+ (BitWidth =
+ MemberDecl->getBitWidth()->getIntegerConstantExpr(Context))) {
+ llvm::APSInt ToSize(BitWidth->getBitWidth(), BitWidth->isUnsigned());
ToSize = Context.getTypeSize(ToType);
// Are we promoting to an int from a bitfield that fits in an int?
- if (BitWidth < ToSize ||
- (FromType->isSignedIntegerType() && BitWidth <= ToSize)) {
+ if (*BitWidth < ToSize ||
+ (FromType->isSignedIntegerType() && *BitWidth <= ToSize)) {
return To->getKind() == BuiltinType::Int;
}
// Are we promoting to an unsigned int from an unsigned bitfield
// that fits into an unsigned int?
- if (FromType->isUnsignedIntegerType() && BitWidth <= ToSize) {
+ if (FromType->isUnsignedIntegerType() && *BitWidth <= ToSize) {
return To->getKind() == BuiltinType::UInt;
}
@@ -3643,13 +3646,32 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
return true;
}
+// Helper for compareConversionFunctions that gets the FunctionType that the
+// conversion-operator return value 'points' to, or nullptr.
+static const FunctionType *
+getConversionOpReturnTyAsFunction(CXXConversionDecl *Conv) {
+ const FunctionType *ConvFuncTy = Conv->getType()->castAs<FunctionType>();
+ const PointerType *RetPtrTy =
+ ConvFuncTy->getReturnType()->getAs<PointerType>();
+
+ if (!RetPtrTy)
+ return nullptr;
+
+ return RetPtrTy->getPointeeType()->getAs<FunctionType>();
+}
+
/// Compare the user-defined conversion functions or constructors
/// of two user-defined conversion sequences to determine whether any ordering
/// is possible.
static ImplicitConversionSequence::CompareKind
compareConversionFunctions(Sema &S, FunctionDecl *Function1,
FunctionDecl *Function2) {
- if (!S.getLangOpts().ObjC || !S.getLangOpts().CPlusPlus11)
+ CXXConversionDecl *Conv1 = dyn_cast_or_null<CXXConversionDecl>(Function1);
+ CXXConversionDecl *Conv2 = dyn_cast_or_null<CXXConversionDecl>(Function2);
+ if (!Conv1 || !Conv2)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ if (!Conv1->getParent()->isLambda() || !Conv2->getParent()->isLambda())
return ImplicitConversionSequence::Indistinguishable;
// Objective-C++:
@@ -3658,15 +3680,7 @@ compareConversionFunctions(Sema &S, FunctionDecl *Function1,
// respectively, always prefer the conversion to a function pointer,
// because the function pointer is more lightweight and is more likely
// to keep code working.
- CXXConversionDecl *Conv1 = dyn_cast_or_null<CXXConversionDecl>(Function1);
- if (!Conv1)
- return ImplicitConversionSequence::Indistinguishable;
-
- CXXConversionDecl *Conv2 = dyn_cast<CXXConversionDecl>(Function2);
- if (!Conv2)
- return ImplicitConversionSequence::Indistinguishable;
-
- if (Conv1->getParent()->isLambda() && Conv2->getParent()->isLambda()) {
+ if (S.getLangOpts().ObjC && S.getLangOpts().CPlusPlus11) {
bool Block1 = Conv1->getConversionType()->isBlockPointerType();
bool Block2 = Conv2->getConversionType()->isBlockPointerType();
if (Block1 != Block2)
@@ -3674,6 +3688,39 @@ compareConversionFunctions(Sema &S, FunctionDecl *Function1,
: ImplicitConversionSequence::Better;
}
+ // In order to support multiple calling conventions for the lambda conversion
+ // operator (such as when the free and member function calling convention is
+ // different), prefer the 'free' mechanism, followed by the calling-convention
+ // of operator(). The latter is in place to support the MSVC-like solution of
+ // defining ALL of the possible conversions in regards to calling-convention.
+ const FunctionType *Conv1FuncRet = getConversionOpReturnTyAsFunction(Conv1);
+ const FunctionType *Conv2FuncRet = getConversionOpReturnTyAsFunction(Conv2);
+
+ if (Conv1FuncRet && Conv2FuncRet &&
+ Conv1FuncRet->getCallConv() != Conv2FuncRet->getCallConv()) {
+ CallingConv Conv1CC = Conv1FuncRet->getCallConv();
+ CallingConv Conv2CC = Conv2FuncRet->getCallConv();
+
+ CXXMethodDecl *CallOp = Conv2->getParent()->getLambdaCallOperator();
+ const FunctionProtoType *CallOpProto =
+ CallOp->getType()->getAs<FunctionProtoType>();
+
+ CallingConv CallOpCC =
+ CallOp->getType()->getAs<FunctionType>()->getCallConv();
+ CallingConv DefaultFree = S.Context.getDefaultCallingConvention(
+ CallOpProto->isVariadic(), /*IsCXXMethod=*/false);
+ CallingConv DefaultMember = S.Context.getDefaultCallingConvention(
+ CallOpProto->isVariadic(), /*IsCXXMethod=*/true);
+
+ CallingConv PrefOrder[] = {DefaultFree, DefaultMember, CallOpCC};
+ for (CallingConv CC : PrefOrder) {
+ if (Conv1CC == CC)
+ return ImplicitConversionSequence::Better;
+ if (Conv2CC == CC)
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
return ImplicitConversionSequence::Indistinguishable;
}
@@ -4102,6 +4149,20 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
: ImplicitConversionSequence::Worse;
}
+ if (SCS1.Second == ICK_SVE_Vector_Conversion &&
+ SCS2.Second == ICK_SVE_Vector_Conversion) {
+ bool SCS1IsCompatibleSVEVectorConversion =
+ S.Context.areCompatibleSveTypes(SCS1.getFromType(), SCS1.getToType(2));
+ bool SCS2IsCompatibleSVEVectorConversion =
+ S.Context.areCompatibleSveTypes(SCS2.getFromType(), SCS2.getToType(2));
+
+ if (SCS1IsCompatibleSVEVectorConversion !=
+ SCS2IsCompatibleSVEVectorConversion)
+ return SCS1IsCompatibleSVEVectorConversion
+ ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ }
+
return ImplicitConversionSequence::Indistinguishable;
}
@@ -4772,8 +4833,11 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
// -- Otherwise, the reference shall be an lvalue reference to a
// non-volatile const type (i.e., cv1 shall be const), or the reference
// shall be an rvalue reference.
- if (!isRValRef && (!T1.isConstQualified() || T1.isVolatileQualified()))
+ if (!isRValRef && (!T1.isConstQualified() || T1.isVolatileQualified())) {
+ if (InitCategory.isRValue() && RefRelationship != Sema::Ref_Incompatible)
+ ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, Init, DeclType);
return ICS;
+ }
// -- If the initializer expression
//
@@ -4863,9 +4927,11 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
// If T1 is reference-related to T2 and the reference is an rvalue
// reference, the initializer expression shall not be an lvalue.
- if (RefRelationship >= Sema::Ref_Related &&
- isRValRef && Init->Classify(S.Context).isLValue())
+ if (RefRelationship >= Sema::Ref_Related && isRValRef &&
+ Init->Classify(S.Context).isLValue()) {
+ ICS.setBad(BadConversionSequence::rvalue_ref_to_lvalue, Init, DeclType);
return ICS;
+ }
// C++ [over.ics.ref]p2:
// When a parameter of reference type is not bound directly to
@@ -4903,11 +4969,8 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
// binding an rvalue reference to an lvalue other than a function
// lvalue.
// Note that the function case is not possible here.
- if (DeclType->isRValueReferenceType() && LValRefType) {
- // FIXME: This is the wrong BadConversionSequence. The problem is binding
- // an rvalue reference to a (non-function) lvalue, not binding an lvalue
- // reference to an rvalue!
- ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, Init, DeclType);
+ if (isRValRef && LValRefType) {
+ ICS.setBad(BadConversionSequence::no_conversion, Init, DeclType);
return ICS;
}
@@ -4968,18 +5031,19 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
InOverloadResolution,
AllowObjCWritebackConversion);
}
- // FIXME: Check the other conditions here: array of character type,
- // initializer is a string literal.
- if (ToType->isArrayType()) {
- InitializedEntity Entity =
- InitializedEntity::InitializeParameter(S.Context, ToType,
- /*Consumed=*/false);
- if (S.CanPerformCopyInitialization(Entity, From)) {
- Result.setStandard();
- Result.Standard.setAsIdentityConversion();
- Result.Standard.setFromType(ToType);
- Result.Standard.setAllToTypes(ToType);
- return Result;
+
+ if (const auto *AT = S.Context.getAsArrayType(ToType)) {
+ if (S.IsStringInit(From->getInit(0), AT)) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(S.Context, ToType,
+ /*Consumed=*/false);
+ if (S.CanPerformCopyInitialization(Entity, From)) {
+ Result.setStandard();
+ Result.Standard.setAsIdentityConversion();
+ Result.Standard.setFromType(ToType);
+ Result.Standard.setAllToTypes(ToType);
+ return Result;
+ }
}
}
}
@@ -5491,7 +5555,6 @@ static bool CheckConvertedConstantConversions(Sema &S,
// conversions are fine.
switch (SCS.Second) {
case ICK_Identity:
- case ICK_Function_Conversion:
case ICK_Integral_Promotion:
case ICK_Integral_Conversion: // Narrowing conversions are checked elsewhere.
case ICK_Zero_Queue_Conversion:
@@ -5522,6 +5585,7 @@ static bool CheckConvertedConstantConversions(Sema &S,
case ICK_Compatible_Conversion:
case ICK_Derived_To_Base:
case ICK_Vector_Conversion:
+ case ICK_SVE_Vector_Conversion:
case ICK_Vector_Splat:
case ICK_Complex_Real:
case ICK_Block_Pointer_Conversion:
@@ -5537,6 +5601,7 @@ static bool CheckConvertedConstantConversions(Sema &S,
case ICK_Function_To_Pointer:
llvm_unreachable("found a first conversion kind in Second");
+ case ICK_Function_Conversion:
case ICK_Qualification:
llvm_unreachable("found a third conversion kind in Second");
@@ -5553,7 +5618,8 @@ static bool CheckConvertedConstantConversions(Sema &S,
static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
QualType T, APValue &Value,
Sema::CCEKind CCE,
- bool RequireInt) {
+ bool RequireInt,
+ NamedDecl *Dest) {
assert(S.getLangOpts().CPlusPlus11 &&
"converted constant expression outside C++11");
@@ -5583,9 +5649,10 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
SCS = &ICS.Standard;
break;
case ImplicitConversionSequence::UserDefinedConversion:
- // We are converting to a non-class type, so the Before sequence
- // must be trivial.
- SCS = &ICS.UserDefined.After;
+ if (T->isRecordType())
+ SCS = &ICS.UserDefined.Before;
+ else
+ SCS = &ICS.UserDefined.After;
break;
case ImplicitConversionSequence::AmbiguousConversion:
case ImplicitConversionSequence::BadConversion:
@@ -5612,8 +5679,20 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
<< From->getType() << From->getSourceRange() << T;
}
- ExprResult Result =
- S.PerformImplicitConversion(From, T, ICS, Sema::AA_Converting);
+ // Usually we can simply apply the ImplicitConversionSequence we formed
+ // earlier, but that's not guaranteed to work when initializing an object of
+ // class type.
+ ExprResult Result;
+ if (T->isRecordType()) {
+ assert(CCE == Sema::CCEK_TemplateArg &&
+ "unexpected class type converted constant expr");
+ Result = S.PerformCopyInitialization(
+ InitializedEntity::InitializeTemplateParameter(
+ T, cast<NonTypeTemplateParmDecl>(Dest)),
+ SourceLocation(), From);
+ } else {
+ Result = S.PerformImplicitConversion(From, T, ICS, Sema::AA_Converting);
+ }
if (Result.isInvalid())
return Result;
@@ -5626,6 +5705,7 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
return Result;
// Check for a narrowing implicit conversion.
+ bool ReturnPreNarrowingValue = false;
APValue PreNarrowingValue;
QualType PreNarrowingType;
switch (SCS->getNarrowingKind(S.Context, Result.get(), PreNarrowingValue,
@@ -5640,12 +5720,22 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
break;
case NK_Constant_Narrowing:
+ if (CCE == Sema::CCEK_ArrayBound &&
+ PreNarrowingType->isIntegralOrEnumerationType() &&
+ PreNarrowingValue.isInt()) {
+ // Don't diagnose array bound narrowing here; we produce more precise
+ // errors by allowing the un-narrowed value through.
+ ReturnPreNarrowingValue = true;
+ break;
+ }
S.Diag(From->getBeginLoc(), diag::ext_cce_narrowing)
<< CCE << /*Constant*/ 1
<< PreNarrowingValue.getAsString(S.Context, PreNarrowingType) << T;
break;
case NK_Type_Narrowing:
+ // FIXME: It would be better to diagnose that the expression is not a
+ // constant expression.
S.Diag(From->getBeginLoc(), diag::ext_cce_narrowing)
<< CCE << /*Constant*/ 0 << From->getType() << T;
break;
@@ -5660,11 +5750,16 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
SmallVector<PartialDiagnosticAt, 8> Notes;
Expr::EvalResult Eval;
Eval.Diag = &Notes;
- Expr::ConstExprUsage Usage = CCE == Sema::CCEK_TemplateArg
- ? Expr::EvaluateForMangling
- : Expr::EvaluateForCodeGen;
- if (!Result.get()->EvaluateAsConstantExpr(Eval, Usage, S.Context) ||
+ ConstantExprKind Kind;
+ if (CCE == Sema::CCEK_TemplateArg && T->isRecordType())
+ Kind = ConstantExprKind::ClassTemplateArgument;
+ else if (CCE == Sema::CCEK_TemplateArg)
+ Kind = ConstantExprKind::NonClassTemplateArgument;
+ else
+ Kind = ConstantExprKind::Normal;
+
+ if (!Result.get()->EvaluateAsConstantExpr(Eval, S.Context, Kind) ||
(RequireInt && !Eval.Val.isInt())) {
// The expression can't be folded, so we can't keep it at this position in
// the AST.
@@ -5674,15 +5769,23 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
if (Notes.empty()) {
// It's a constant expression.
- return ConstantExpr::Create(S.Context, Result.get(), Value);
+ Expr *E = ConstantExpr::Create(S.Context, Result.get(), Value);
+ if (ReturnPreNarrowingValue)
+ Value = std::move(PreNarrowingValue);
+ return E;
}
}
// It's not a constant expression. Produce an appropriate diagnostic.
if (Notes.size() == 1 &&
- Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr)
+ Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr) {
S.Diag(Notes[0].first, diag::err_expr_not_cce) << CCE;
- else {
+ } else if (!Notes.empty() && Notes[0].second.getDiagID() ==
+ diag::note_constexpr_invalid_template_arg) {
+ Notes[0].second.setDiagID(diag::err_constexpr_invalid_template_arg);
+ for (unsigned I = 0; I < Notes.size(); ++I)
+ S.Diag(Notes[I].first, Notes[I].second);
+ } else {
S.Diag(From->getBeginLoc(), diag::err_expr_not_cce)
<< CCE << From->getSourceRange();
for (unsigned I = 0; I < Notes.size(); ++I)
@@ -5692,8 +5795,10 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
}
ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
- APValue &Value, CCEKind CCE) {
- return ::CheckConvertedConstantExpression(*this, From, T, Value, CCE, false);
+ APValue &Value, CCEKind CCE,
+ NamedDecl *Dest) {
+ return ::CheckConvertedConstantExpression(*this, From, T, Value, CCE, false,
+ Dest);
}
ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
@@ -5702,7 +5807,8 @@ ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
assert(T->isIntegralOrEnumerationType() && "unexpected converted const type");
APValue V;
- auto R = ::CheckConvertedConstantExpression(*this, From, T, V, CCE, true);
+ auto R = ::CheckConvertedConstantExpression(*this, From, T, V, CCE, true,
+ /*Dest=*/nullptr);
if (!R.isInvalid() && !R.get()->isValueDependent())
Value = V.getInt();
return R;
@@ -5831,7 +5937,8 @@ diagnoseNoViableConversion(Sema &SemaRef, SourceLocation Loc, Expr *&From,
// Record usage of conversion in an implicit cast.
From = ImplicitCastExpr::Create(SemaRef.Context, Result.get()->getType(),
CK_UserDefinedConversion, Result.get(),
- nullptr, Result.get()->getValueKind());
+ nullptr, Result.get()->getValueKind(),
+ SemaRef.CurFPFeatureOverrides());
}
return false;
}
@@ -5860,7 +5967,8 @@ static bool recordConversion(Sema &SemaRef, SourceLocation Loc, Expr *&From,
// Record usage of conversion in an implicit cast.
From = ImplicitCastExpr::Create(SemaRef.Context, Result.get()->getType(),
CK_UserDefinedConversion, Result.get(),
- nullptr, Result.get()->getValueKind());
+ nullptr, Result.get()->getValueKind(),
+ SemaRef.CurFPFeatureOverrides());
return false;
}
@@ -7265,8 +7373,8 @@ void Sema::AddConversionCandidate(
VK_LValue, From->getBeginLoc());
ImplicitCastExpr ConversionFn(ImplicitCastExpr::OnStack,
Context.getPointerType(Conversion->getType()),
- CK_FunctionToPointerDecay,
- &ConversionRef, VK_RValue);
+ CK_FunctionToPointerDecay, &ConversionRef,
+ VK_RValue, FPOptionsOverride());
QualType ConversionType = Conversion->getConversionType();
if (!isCompleteType(From->getBeginLoc(), ConversionType)) {
@@ -7735,26 +7843,14 @@ public:
bool AllowExplicitConversions,
const Qualifiers &VisibleTypeConversionsQuals);
- /// pointer_begin - First pointer type found;
- iterator pointer_begin() { return PointerTypes.begin(); }
-
- /// pointer_end - Past the last pointer type found;
- iterator pointer_end() { return PointerTypes.end(); }
-
- /// member_pointer_begin - First member pointer type found;
- iterator member_pointer_begin() { return MemberPointerTypes.begin(); }
-
- /// member_pointer_end - Past the last member pointer type found;
- iterator member_pointer_end() { return MemberPointerTypes.end(); }
-
- /// enumeration_begin - First enumeration type found;
- iterator enumeration_begin() { return EnumerationTypes.begin(); }
-
- /// enumeration_end - Past the last enumeration type found;
- iterator enumeration_end() { return EnumerationTypes.end(); }
-
+ llvm::iterator_range<iterator> pointer_types() { return PointerTypes; }
+ llvm::iterator_range<iterator> member_pointer_types() {
+ return MemberPointerTypes;
+ }
+ llvm::iterator_range<iterator> enumeration_types() {
+ return EnumerationTypes;
+ }
llvm::iterator_range<iterator> vector_types() { return VectorTypes; }
-
llvm::iterator_range<iterator> matrix_types() { return MatrixTypes; }
bool containsMatrixType(QualType Ty) const { return MatrixTypes.count(Ty); }
@@ -8091,12 +8187,16 @@ class BuiltinOperatorOverloadBuilder {
ArithmeticTypes.push_back(S.Context.IntTy);
ArithmeticTypes.push_back(S.Context.LongTy);
ArithmeticTypes.push_back(S.Context.LongLongTy);
- if (S.Context.getTargetInfo().hasInt128Type())
+ if (S.Context.getTargetInfo().hasInt128Type() ||
+ (S.Context.getAuxTargetInfo() &&
+ S.Context.getAuxTargetInfo()->hasInt128Type()))
ArithmeticTypes.push_back(S.Context.Int128Ty);
ArithmeticTypes.push_back(S.Context.UnsignedIntTy);
ArithmeticTypes.push_back(S.Context.UnsignedLongTy);
ArithmeticTypes.push_back(S.Context.UnsignedLongLongTy);
- if (S.Context.getTargetInfo().hasInt128Type())
+ if (S.Context.getTargetInfo().hasInt128Type() ||
+ (S.Context.getAuxTargetInfo() &&
+ S.Context.getAuxTargetInfo()->hasInt128Type()))
ArithmeticTypes.push_back(S.Context.UnsignedInt128Ty);
LastPromotedIntegralType = ArithmeticTypes.size();
LastPromotedArithmeticType = ArithmeticTypes.size();
@@ -8238,19 +8338,17 @@ public:
// T* operator++(T*VQ&, int);
// T* operator--(T*VQ&, int);
void addPlusPlusMinusMinusPointerOverloads() {
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[0].pointer_begin(),
- PtrEnd = CandidateTypes[0].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
+ for (QualType PtrTy : CandidateTypes[0].pointer_types()) {
// Skip pointer types that aren't pointers to object types.
- if (!(*Ptr)->getPointeeType()->isObjectType())
+ if (!PtrTy->getPointeeType()->isObjectType())
continue;
- addPlusPlusMinusMinusStyleOverloads(*Ptr,
- (!(*Ptr).isVolatileQualified() &&
- VisibleTypeConversionsQuals.hasVolatile()),
- (!(*Ptr).isRestrictQualified() &&
- VisibleTypeConversionsQuals.hasRestrict()));
+ addPlusPlusMinusMinusStyleOverloads(
+ PtrTy,
+ (!PtrTy.isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile()),
+ (!PtrTy.isRestrictQualified() &&
+ VisibleTypeConversionsQuals.hasRestrict()));
}
}
@@ -8265,11 +8363,7 @@ public:
// ref-qualifier, there exist candidate operator functions of the form
// T& operator*(T*);
void addUnaryStarPointerOverloads() {
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[0].pointer_begin(),
- PtrEnd = CandidateTypes[0].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
- QualType ParamTy = *Ptr;
+ for (QualType ParamTy : CandidateTypes[0].pointer_types()) {
QualType PointeeTy = ParamTy->getPointeeType();
if (!PointeeTy->isObjectType() && !PointeeTy->isFunctionType())
continue;
@@ -8309,13 +8403,8 @@ public:
//
// T* operator+(T*);
void addUnaryPlusPointerOverloads() {
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[0].pointer_begin(),
- PtrEnd = CandidateTypes[0].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
- QualType ParamTy = *Ptr;
+ for (QualType ParamTy : CandidateTypes[0].pointer_types())
S.AddBuiltinCandidate(&ParamTy, Args, CandidateSet);
- }
}
// C++ [over.built]p10:
@@ -8349,16 +8438,12 @@ public:
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
- for (BuiltinCandidateTypeSet::iterator
- MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
- MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
- MemPtr != MemPtrEnd;
- ++MemPtr) {
+ for (QualType MemPtrTy : CandidateTypes[ArgIdx].member_pointer_types()) {
// Don't add the same builtin candidate twice.
- if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
+ if (!AddedTypes.insert(S.Context.getCanonicalType(MemPtrTy)).second)
continue;
- QualType ParamTypes[2] = { *MemPtr, *MemPtr };
+ QualType ParamTypes[2] = {MemPtrTy, MemPtrTy};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
@@ -8401,8 +8486,7 @@ public:
UserDefinedBinaryOperators;
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
- if (CandidateTypes[ArgIdx].enumeration_begin() !=
- CandidateTypes[ArgIdx].enumeration_end()) {
+ if (!CandidateTypes[ArgIdx].enumeration_types().empty()) {
for (OverloadCandidateSet::iterator C = CandidateSet.begin(),
CEnd = CandidateSet.end();
C != CEnd; ++C) {
@@ -8440,22 +8524,16 @@ public:
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[ArgIdx].pointer_begin(),
- PtrEnd = CandidateTypes[ArgIdx].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
+ for (QualType PtrTy : CandidateTypes[ArgIdx].pointer_types()) {
// Don't add the same builtin candidate twice.
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
+ if (!AddedTypes.insert(S.Context.getCanonicalType(PtrTy)).second)
continue;
- QualType ParamTypes[2] = { *Ptr, *Ptr };
+ QualType ParamTypes[2] = {PtrTy, PtrTy};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
- for (BuiltinCandidateTypeSet::iterator
- Enum = CandidateTypes[ArgIdx].enumeration_begin(),
- EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
- Enum != EnumEnd; ++Enum) {
- CanQualType CanonType = S.Context.getCanonicalType(*Enum);
+ for (QualType EnumTy : CandidateTypes[ArgIdx].enumeration_types()) {
+ CanQualType CanonType = S.Context.getCanonicalType(EnumTy);
// Don't add the same builtin candidate twice, or if a user defined
// candidate exists.
@@ -8463,7 +8541,7 @@ public:
UserDefinedBinaryOperators.count(std::make_pair(CanonType,
CanonType)))
continue;
- QualType ParamTypes[2] = { *Enum, *Enum };
+ QualType ParamTypes[2] = {EnumTy, EnumTy};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
@@ -8495,15 +8573,12 @@ public:
S.Context.getPointerDiffType(),
S.Context.getPointerDiffType(),
};
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[Arg].pointer_begin(),
- PtrEnd = CandidateTypes[Arg].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
- QualType PointeeTy = (*Ptr)->getPointeeType();
+ for (QualType PtrTy : CandidateTypes[Arg].pointer_types()) {
+ QualType PointeeTy = PtrTy->getPointeeType();
if (!PointeeTy->isObjectType())
continue;
- AsymmetricParamTypes[Arg] = *Ptr;
+ AsymmetricParamTypes[Arg] = PtrTy;
if (Arg == 0 || Op == OO_Plus) {
// operator+(T*, ptrdiff_t) or operator-(T*, ptrdiff_t)
// T* operator+(ptrdiff_t, T*);
@@ -8511,10 +8586,10 @@ public:
}
if (Op == OO_Minus) {
// ptrdiff_t operator-(T, T);
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
+ if (!AddedTypes.insert(S.Context.getCanonicalType(PtrTy)).second)
continue;
- QualType ParamTypes[2] = { *Ptr, *Ptr };
+ QualType ParamTypes[2] = {PtrTy, PtrTy};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
@@ -8670,24 +8745,18 @@ public:
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
- for (BuiltinCandidateTypeSet::iterator
- Enum = CandidateTypes[ArgIdx].enumeration_begin(),
- EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
- Enum != EnumEnd; ++Enum) {
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)).second)
+ for (QualType EnumTy : CandidateTypes[ArgIdx].enumeration_types()) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(EnumTy)).second)
continue;
- AddBuiltinAssignmentOperatorCandidates(S, *Enum, Args, CandidateSet);
+ AddBuiltinAssignmentOperatorCandidates(S, EnumTy, Args, CandidateSet);
}
- for (BuiltinCandidateTypeSet::iterator
- MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
- MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
- MemPtr != MemPtrEnd; ++MemPtr) {
- if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
+ for (QualType MemPtrTy : CandidateTypes[ArgIdx].member_pointer_types()) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(MemPtrTy)).second)
continue;
- AddBuiltinAssignmentOperatorCandidates(S, *MemPtr, Args, CandidateSet);
+ AddBuiltinAssignmentOperatorCandidates(S, MemPtrTy, Args, CandidateSet);
}
}
}
@@ -8712,49 +8781,44 @@ public:
/// Set of (canonical) types that we've already handled.
llvm::SmallPtrSet<QualType, 8> AddedTypes;
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[0].pointer_begin(),
- PtrEnd = CandidateTypes[0].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
+ for (QualType PtrTy : CandidateTypes[0].pointer_types()) {
// If this is operator=, keep track of the builtin candidates we added.
if (isEqualOp)
- AddedTypes.insert(S.Context.getCanonicalType(*Ptr));
- else if (!(*Ptr)->getPointeeType()->isObjectType())
+ AddedTypes.insert(S.Context.getCanonicalType(PtrTy));
+ else if (!PtrTy->getPointeeType()->isObjectType())
continue;
// non-volatile version
QualType ParamTypes[2] = {
- S.Context.getLValueReferenceType(*Ptr),
- isEqualOp ? *Ptr : S.Context.getPointerDiffType(),
+ S.Context.getLValueReferenceType(PtrTy),
+ isEqualOp ? PtrTy : S.Context.getPointerDiffType(),
};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/ isEqualOp);
- bool NeedVolatile = !(*Ptr).isVolatileQualified() &&
+ bool NeedVolatile = !PtrTy.isVolatileQualified() &&
VisibleTypeConversionsQuals.hasVolatile();
if (NeedVolatile) {
// volatile version
ParamTypes[0] =
- S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
+ S.Context.getLValueReferenceType(S.Context.getVolatileType(PtrTy));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/isEqualOp);
}
- if (!(*Ptr).isRestrictQualified() &&
+ if (!PtrTy.isRestrictQualified() &&
VisibleTypeConversionsQuals.hasRestrict()) {
// restrict version
- ParamTypes[0]
- = S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(S.Context.getRestrictType(PtrTy));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/isEqualOp);
if (NeedVolatile) {
// volatile restrict version
- ParamTypes[0]
- = S.Context.getLValueReferenceType(
- S.Context.getCVRQualifiedType(*Ptr,
- (Qualifiers::Volatile |
- Qualifiers::Restrict)));
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(S.Context.getCVRQualifiedType(
+ PtrTy, (Qualifiers::Volatile | Qualifiers::Restrict)));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/isEqualOp);
}
@@ -8762,48 +8826,43 @@ public:
}
if (isEqualOp) {
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[1].pointer_begin(),
- PtrEnd = CandidateTypes[1].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
+ for (QualType PtrTy : CandidateTypes[1].pointer_types()) {
// Make sure we don't add the same candidate twice.
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
+ if (!AddedTypes.insert(S.Context.getCanonicalType(PtrTy)).second)
continue;
QualType ParamTypes[2] = {
- S.Context.getLValueReferenceType(*Ptr),
- *Ptr,
+ S.Context.getLValueReferenceType(PtrTy),
+ PtrTy,
};
// non-volatile version
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/true);
- bool NeedVolatile = !(*Ptr).isVolatileQualified() &&
- VisibleTypeConversionsQuals.hasVolatile();
+ bool NeedVolatile = !PtrTy.isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile();
if (NeedVolatile) {
// volatile version
- ParamTypes[0] =
- S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
+ ParamTypes[0] = S.Context.getLValueReferenceType(
+ S.Context.getVolatileType(PtrTy));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/true);
}
- if (!(*Ptr).isRestrictQualified() &&
+ if (!PtrTy.isRestrictQualified() &&
VisibleTypeConversionsQuals.hasRestrict()) {
// restrict version
- ParamTypes[0]
- = S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
+ ParamTypes[0] = S.Context.getLValueReferenceType(
+ S.Context.getRestrictType(PtrTy));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/true);
if (NeedVolatile) {
// volatile restrict version
- ParamTypes[0]
- = S.Context.getLValueReferenceType(
- S.Context.getCVRQualifiedType(*Ptr,
- (Qualifiers::Volatile |
- Qualifiers::Restrict)));
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(S.Context.getCVRQualifiedType(
+ PtrTy, (Qualifiers::Volatile | Qualifiers::Restrict)));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/true);
}
@@ -8938,12 +8997,9 @@ public:
// T* operator+(ptrdiff_t, T*); [ABOVE]
// T& operator[](ptrdiff_t, T*);
void addSubscriptOverloads() {
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[0].pointer_begin(),
- PtrEnd = CandidateTypes[0].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
- QualType ParamTypes[2] = { *Ptr, S.Context.getPointerDiffType() };
- QualType PointeeType = (*Ptr)->getPointeeType();
+ for (QualType PtrTy : CandidateTypes[0].pointer_types()) {
+ QualType ParamTypes[2] = {PtrTy, S.Context.getPointerDiffType()};
+ QualType PointeeType = PtrTy->getPointeeType();
if (!PointeeType->isObjectType())
continue;
@@ -8951,12 +9007,9 @@ public:
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[1].pointer_begin(),
- PtrEnd = CandidateTypes[1].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
- QualType ParamTypes[2] = { S.Context.getPointerDiffType(), *Ptr };
- QualType PointeeType = (*Ptr)->getPointeeType();
+ for (QualType PtrTy : CandidateTypes[1].pointer_types()) {
+ QualType ParamTypes[2] = {S.Context.getPointerDiffType(), PtrTy};
+ QualType PointeeType = PtrTy->getPointeeType();
if (!PointeeType->isObjectType())
continue;
@@ -8975,11 +9028,8 @@ public:
//
// where CV12 is the union of CV1 and CV2.
void addArrowStarOverloads() {
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[0].pointer_begin(),
- PtrEnd = CandidateTypes[0].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
- QualType C1Ty = (*Ptr);
+ for (QualType PtrTy : CandidateTypes[0].pointer_types()) {
+ QualType C1Ty = PtrTy;
QualType C1;
QualifierCollector Q1;
C1 = QualType(Q1.strip(C1Ty->getPointeeType()), 0);
@@ -8992,16 +9042,13 @@ public:
continue;
if (!VisibleTypeConversionsQuals.hasRestrict() && Q1.hasRestrict())
continue;
- for (BuiltinCandidateTypeSet::iterator
- MemPtr = CandidateTypes[1].member_pointer_begin(),
- MemPtrEnd = CandidateTypes[1].member_pointer_end();
- MemPtr != MemPtrEnd; ++MemPtr) {
- const MemberPointerType *mptr = cast<MemberPointerType>(*MemPtr);
+ for (QualType MemPtrTy : CandidateTypes[1].member_pointer_types()) {
+ const MemberPointerType *mptr = cast<MemberPointerType>(MemPtrTy);
QualType C2 = QualType(mptr->getClass(), 0);
C2 = C2.getUnqualifiedType();
if (C1 != C2 && !S.IsDerivedFrom(CandidateSet.getLocation(), C1, C2))
break;
- QualType ParamTypes[2] = { *Ptr, *MemPtr };
+ QualType ParamTypes[2] = {PtrTy, MemPtrTy};
// build CV12 T&
QualType T = mptr->getPointeeType();
if (!VisibleTypeConversionsQuals.hasVolatile() &&
@@ -9031,40 +9078,31 @@ public:
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
- for (BuiltinCandidateTypeSet::iterator
- Ptr = CandidateTypes[ArgIdx].pointer_begin(),
- PtrEnd = CandidateTypes[ArgIdx].pointer_end();
- Ptr != PtrEnd; ++Ptr) {
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
+ for (QualType PtrTy : CandidateTypes[ArgIdx].pointer_types()) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(PtrTy)).second)
continue;
- QualType ParamTypes[2] = { *Ptr, *Ptr };
+ QualType ParamTypes[2] = {PtrTy, PtrTy};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
- for (BuiltinCandidateTypeSet::iterator
- MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
- MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
- MemPtr != MemPtrEnd; ++MemPtr) {
- if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
+ for (QualType MemPtrTy : CandidateTypes[ArgIdx].member_pointer_types()) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(MemPtrTy)).second)
continue;
- QualType ParamTypes[2] = { *MemPtr, *MemPtr };
+ QualType ParamTypes[2] = {MemPtrTy, MemPtrTy};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
if (S.getLangOpts().CPlusPlus11) {
- for (BuiltinCandidateTypeSet::iterator
- Enum = CandidateTypes[ArgIdx].enumeration_begin(),
- EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
- Enum != EnumEnd; ++Enum) {
- if (!(*Enum)->castAs<EnumType>()->getDecl()->isScoped())
+ for (QualType EnumTy : CandidateTypes[ArgIdx].enumeration_types()) {
+ if (!EnumTy->castAs<EnumType>()->getDecl()->isScoped())
continue;
- if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)).second)
+ if (!AddedTypes.insert(S.Context.getCanonicalType(EnumTy)).second)
continue;
- QualType ParamTypes[2] = { *Enum, *Enum };
+ QualType ParamTypes[2] = {EnumTy, EnumTy};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
@@ -9508,6 +9546,75 @@ bool clang::isBetterOverloadCandidate(
else if (!Cand1.Viable)
return false;
+ // [CUDA] A function with 'never' preference is marked not viable, therefore
+ // is never shown up here. The worst preference shown up here is 'wrong side',
+ // e.g. an H function called by a HD function in device compilation. This is
+ // valid AST as long as the HD function is not emitted, e.g. it is an inline
+ // function which is called only by an H function. A deferred diagnostic will
+ // be triggered if it is emitted. However a wrong-sided function is still
+ // a viable candidate here.
+ //
+ // If Cand1 can be emitted and Cand2 cannot be emitted in the current
+ // context, Cand1 is better than Cand2. If Cand1 can not be emitted and Cand2
+ // can be emitted, Cand1 is not better than Cand2. This rule should have
+ // precedence over other rules.
+ //
+ // If both Cand1 and Cand2 can be emitted, or neither can be emitted, then
+ // other rules should be used to determine which is better. This is because
+ // host/device based overloading resolution is mostly for determining
+ // viability of a function. If two functions are both viable, other factors
+ // should take precedence in preference, e.g. the standard-defined preferences
+ // like argument conversion ranks or enable_if partial-ordering. The
+ // preference for pass-object-size parameters is probably most similar to a
+ // type-based-overloading decision and so should take priority.
+ //
+ // If other rules cannot determine which is better, CUDA preference will be
+ // used again to determine which is better.
+ //
+ // TODO: Currently IdentifyCUDAPreference does not return correct values
+ // for functions called in global variable initializers due to missing
+ // correct context about device/host. Therefore we can only enforce this
+ // rule when there is a caller. We should enforce this rule for functions
+ // in global variable initializers once proper context is added.
+ //
+ // TODO: We can only enable the hostness based overloading resolution when
+ // -fgpu-exclude-wrong-side-overloads is on since this requires deferring
+ // overloading resolution diagnostics.
+ if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function &&
+ S.getLangOpts().GPUExcludeWrongSideOverloads) {
+ if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext)) {
+ bool IsCallerImplicitHD = Sema::isCUDAImplicitHostDeviceFunction(Caller);
+ bool IsCand1ImplicitHD =
+ Sema::isCUDAImplicitHostDeviceFunction(Cand1.Function);
+ bool IsCand2ImplicitHD =
+ Sema::isCUDAImplicitHostDeviceFunction(Cand2.Function);
+ auto P1 = S.IdentifyCUDAPreference(Caller, Cand1.Function);
+ auto P2 = S.IdentifyCUDAPreference(Caller, Cand2.Function);
+ assert(P1 != Sema::CFP_Never && P2 != Sema::CFP_Never);
+ // The implicit HD function may be a function in a system header which
+ // is forced by pragma. In device compilation, if we prefer HD candidates
+ // over wrong-sided candidates, overloading resolution may change, which
+ // may result in non-deferrable diagnostics. As a workaround, we let
+ // implicit HD candidates take equal preference as wrong-sided candidates.
+ // This will preserve the overloading resolution.
+ // TODO: We still need special handling of implicit HD functions since
+ // they may incur other diagnostics to be deferred. We should make all
+ // host/device related diagnostics deferrable and remove special handling
+ // of implicit HD functions.
+ auto EmitThreshold =
+ (S.getLangOpts().CUDAIsDevice && IsCallerImplicitHD &&
+ (IsCand1ImplicitHD || IsCand2ImplicitHD))
+ ? Sema::CFP_Never
+ : Sema::CFP_WrongSide;
+ auto Cand1Emittable = P1 > EmitThreshold;
+ auto Cand2Emittable = P2 > EmitThreshold;
+ if (Cand1Emittable && !Cand2Emittable)
+ return true;
+ if (!Cand1Emittable && Cand2Emittable)
+ return false;
+ }
+ }
+
// C++ [over.match.best]p1:
//
// -- if F is a static member function, ICS1(F) is defined such
@@ -9742,12 +9849,6 @@ bool clang::isBetterOverloadCandidate(
return Cmp == Comparison::Better;
}
- if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function) {
- FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
- return S.IdentifyCUDAPreference(Caller, Cand1.Function) >
- S.IdentifyCUDAPreference(Caller, Cand2.Function);
- }
-
bool HasPS1 = Cand1.Function != nullptr &&
functionHasPassObjectSizeParams(Cand1.Function);
bool HasPS2 = Cand2.Function != nullptr &&
@@ -9755,8 +9856,21 @@ bool clang::isBetterOverloadCandidate(
if (HasPS1 != HasPS2 && HasPS1)
return true;
- Comparison MV = isBetterMultiversionCandidate(Cand1, Cand2);
- return MV == Comparison::Better;
+ auto MV = isBetterMultiversionCandidate(Cand1, Cand2);
+ if (MV == Comparison::Better)
+ return true;
+ if (MV == Comparison::Worse)
+ return false;
+
+ // If other rules cannot determine which is better, CUDA preference is used
+ // to determine which is better.
+ if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function) {
+ FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
+ return S.IdentifyCUDAPreference(Caller, Cand1.Function) >
+ S.IdentifyCUDAPreference(Caller, Cand2.Function);
+ }
+
+ return false;
}
/// Determine whether two declarations are "equivalent" for the purposes of
@@ -9812,6 +9926,7 @@ bool Sema::isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
void Sema::diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv) {
+ assert(D && "Unknown declaration");
Diag(Loc, diag::ext_equivalent_internal_linkage_decl_in_modules) << D;
Module *M = getOwningModule(D);
@@ -9849,7 +9964,11 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
// only on their host/device attributes. Specifically, if one
// candidate call is WrongSide and the other is SameSide, we ignore
// the WrongSide candidate.
- if (S.getLangOpts().CUDA) {
+ // We only need to remove wrong-sided candidates here if
+ // -fgpu-exclude-wrong-side-overloads is off. When
+ // -fgpu-exclude-wrong-side-overloads is on, all candidates are compared
+ // uniformly in isBetterOverloadCandidate.
+ if (S.getLangOpts().CUDA && !S.getLangOpts().GPUExcludeWrongSideOverloads) {
const FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
bool ContainsSameSideCandidate =
llvm::any_of(Candidates, [&](OverloadCandidate *Cand) {
@@ -10110,6 +10229,27 @@ bool Sema::checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
Loc);
}
+// Don't print candidates other than the one that matches the calling
+// convention of the call operator, since that is guaranteed to exist.
+static bool shouldSkipNotingLambdaConversionDecl(FunctionDecl *Fn) {
+ const auto *ConvD = dyn_cast<CXXConversionDecl>(Fn);
+
+ if (!ConvD)
+ return false;
+ const auto *RD = cast<CXXRecordDecl>(Fn->getParent());
+ if (!RD->isLambda())
+ return false;
+
+ CXXMethodDecl *CallOp = RD->getLambdaCallOperator();
+ CallingConv CallOpCC =
+ CallOp->getType()->getAs<FunctionType>()->getCallConv();
+ QualType ConvRTy = ConvD->getType()->getAs<FunctionType>()->getReturnType();
+ CallingConv ConvToCC =
+ ConvRTy->getPointeeType()->getAs<FunctionType>()->getCallConv();
+
+ return ConvToCC != CallOpCC;
+}
+
// Notes the location of an overload candidate.
void Sema::NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind,
@@ -10119,6 +10259,8 @@ void Sema::NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
if (Fn->isMultiVersion() && Fn->hasAttr<TargetAttr>() &&
!Fn->getAttr<TargetAttr>()->isDefaultVersion())
return;
+ if (shouldSkipNotingLambdaConversionDecl(Fn))
+ return;
std::string FnDesc;
std::pair<OverloadCandidateKind, OverloadCandidateSelect> KSPair =
@@ -10337,7 +10479,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
}
unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
- assert(CVR && "unexpected qualifiers mismatch");
+ assert(CVR && "expected qualifiers mismatch");
if (isObjectArgument) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr_this)
@@ -10354,6 +10496,17 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
return;
}
+ if (Conv.Bad.Kind == BadConversionSequence::lvalue_ref_to_rvalue ||
+ Conv.Bad.Kind == BadConversionSequence::rvalue_ref_to_lvalue) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_value_category)
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (unsigned)isObjectArgument << I + 1
+ << (Conv.Bad.Kind == BadConversionSequence::rvalue_ref_to_lvalue)
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange());
+ MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
+ return;
+ }
+
// Special diagnostic for failure to convert an initializer list, since
// telling the user that it has type void is not useful.
if (FromExpr && isa<InitListExpr>(FromExpr)) {
@@ -10411,15 +10564,6 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
!ToRefTy->getPointeeType()->isIncompleteType() &&
S.IsDerivedFrom(SourceLocation(), ToRefTy->getPointeeType(), FromTy)) {
BaseToDerivedConversion = 3;
- } else if (ToTy->isLValueReferenceType() && !FromExpr->isLValue() &&
- ToTy.getNonReferenceType().getCanonicalType() ==
- FromTy.getNonReferenceType().getCanonicalType()) {
- S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_lvalue)
- << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (unsigned)isObjectArgument << I + 1
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange());
- MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
- return;
}
}
@@ -10976,6 +11120,8 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
bool TakingCandidateAddress,
LangAS CtorDestAS = LangAS::Default) {
FunctionDecl *Fn = Cand->Function;
+ if (shouldSkipNotingLambdaConversionDecl(Fn))
+ return;
// Note deleted candidates, but only if they're viable.
if (Cand->Viable) {
@@ -11092,6 +11238,9 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
}
static void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
+ if (shouldSkipNotingLambdaConversionDecl(Cand->Surrogate))
+ return;
+
// Desugar the type of the surrogate down to a function type,
// retaining as many typedefs as possible while still showing
// the function type (and, therefore, its parameter types).
@@ -11482,16 +11631,34 @@ SmallVector<OverloadCandidate *, 32> OverloadCandidateSet::CompleteCandidates(
return Cands;
}
+bool OverloadCandidateSet::shouldDeferDiags(Sema &S, ArrayRef<Expr *> Args,
+ SourceLocation OpLoc) {
+ bool DeferHint = false;
+ if (S.getLangOpts().CUDA && S.getLangOpts().GPUDeferDiag) {
+ // Defer diagnostic for CUDA/HIP if there are wrong-sided candidates or
+ // host device candidates.
+ auto WrongSidedCands =
+ CompleteCandidates(S, OCD_AllCandidates, Args, OpLoc, [](auto &Cand) {
+ return (Cand.Viable == false &&
+ Cand.FailureKind == ovl_fail_bad_target) ||
+ (Cand.Function->template hasAttr<CUDAHostAttr>() &&
+ Cand.Function->template hasAttr<CUDADeviceAttr>());
+ });
+ DeferHint = WrongSidedCands.size();
+ }
+ return DeferHint;
+}
+
/// When overload resolution fails, prints diagnostic messages containing the
/// candidates in the candidate set.
-void OverloadCandidateSet::NoteCandidates(PartialDiagnosticAt PD,
- Sema &S, OverloadCandidateDisplayKind OCD, ArrayRef<Expr *> Args,
- StringRef Opc, SourceLocation OpLoc,
+void OverloadCandidateSet::NoteCandidates(
+ PartialDiagnosticAt PD, Sema &S, OverloadCandidateDisplayKind OCD,
+ ArrayRef<Expr *> Args, StringRef Opc, SourceLocation OpLoc,
llvm::function_ref<bool(OverloadCandidate &)> Filter) {
auto Cands = CompleteCandidates(S, OCD, Args, OpLoc, Filter);
- S.Diag(PD.first, PD.second);
+ S.Diag(PD.first, PD.second, shouldDeferDiags(S, Args, OpLoc));
NoteCandidates(S, Args, Cands, Opc, OpLoc);
@@ -11543,7 +11710,9 @@ void OverloadCandidateSet::NoteCandidates(Sema &S, ArrayRef<Expr *> Args,
}
if (I != E)
- S.Diag(OpLoc, diag::note_ovl_too_many_candidates) << int(E - I);
+ S.Diag(OpLoc, diag::note_ovl_too_many_candidates,
+ shouldDeferDiags(S, Args, OpLoc))
+ << int(E - I);
}
static SourceLocation
@@ -12512,6 +12681,16 @@ void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
CandidateSet, PartialOverloading);
}
+/// Add the call candidates from the given set of lookup results to the given
+/// overload set. Non-function lookup results are ignored.
+void Sema::AddOverloadedCallCandidates(
+ LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
+ ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet) {
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
+ AddOverloadedCallCandidate(*this, I.getPair(), ExplicitTemplateArgs, Args,
+ CandidateSet, false, /*KnownValid*/ false);
+}
+
/// Determine whether a declaration with the specified name could be moved into
/// a different namespace.
static bool canBeDeclaredInNamespace(const DeclarationName &Name) {
@@ -12531,13 +12710,11 @@ static bool canBeDeclaredInNamespace(const DeclarationName &Name) {
/// correctly implement two-stage name lookup.
///
/// Returns true if a viable candidate was found and a diagnostic was issued.
-static bool
-DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
- const CXXScopeSpec &SS, LookupResult &R,
- OverloadCandidateSet::CandidateSetKind CSK,
- TemplateArgumentListInfo *ExplicitTemplateArgs,
- ArrayRef<Expr *> Args,
- bool *DoDiagnoseEmptyLookup = nullptr) {
+static bool DiagnoseTwoPhaseLookup(
+ Sema &SemaRef, SourceLocation FnLoc, const CXXScopeSpec &SS,
+ LookupResult &R, OverloadCandidateSet::CandidateSetKind CSK,
+ TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
+ CXXRecordDecl **FoundInClass = nullptr) {
if (!SemaRef.inTemplateInstantiation() || !SS.isEmpty())
return false;
@@ -12550,26 +12727,32 @@ DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
if (!R.empty()) {
R.suppressDiagnostics();
- if (isa<CXXRecordDecl>(DC)) {
- // Don't diagnose names we find in classes; we get much better
- // diagnostics for these from DiagnoseEmptyLookup.
- R.clear();
- if (DoDiagnoseEmptyLookup)
- *DoDiagnoseEmptyLookup = true;
- return false;
- }
-
OverloadCandidateSet Candidates(FnLoc, CSK);
- for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
- AddOverloadedCallCandidate(SemaRef, I.getPair(),
- ExplicitTemplateArgs, Args,
- Candidates, false, /*KnownValid*/ false);
+ SemaRef.AddOverloadedCallCandidates(R, ExplicitTemplateArgs, Args,
+ Candidates);
OverloadCandidateSet::iterator Best;
- if (Candidates.BestViableFunction(SemaRef, FnLoc, Best) != OR_Success) {
- // No viable functions. Don't bother the user with notes for functions
- // which don't work and shouldn't be found anyway.
- R.clear();
+ OverloadingResult OR =
+ Candidates.BestViableFunction(SemaRef, FnLoc, Best);
+
+ if (auto *RD = dyn_cast<CXXRecordDecl>(DC)) {
+ // We either found non-function declarations or a best viable function
+ // at class scope. A class-scope lookup result disables ADL. Don't
+ // look past this, but let the caller know that we found something that
+ // either is, or might be, usable in this class.
+ if (FoundInClass) {
+ *FoundInClass = RD;
+ if (OR == OR_Success) {
+ R.clear();
+ R.addDecl(Best->FoundDecl.getDecl(), Best->FoundDecl.getAccess());
+ R.resolveKind();
+ }
+ }
+ return false;
+ }
+
+ if (OR != OR_Success) {
+ // There wasn't a unique best function or function template.
return false;
}
@@ -12665,7 +12848,11 @@ public:
/// Attempts to recover from a call where no functions were found.
///
-/// Returns true if new candidates were found.
+/// This function will do one of three things:
+/// * Diagnose, recover, and return a recovery expression.
+/// * Diagnose, fail to recover, and return ExprError().
+/// * Do not diagnose, do not recover, and return ExprResult(). The caller is
+/// expected to diagnose as appropriate.
static ExprResult
BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
@@ -12678,9 +12865,8 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
//
// template <typename T> auto foo(T t) -> decltype(foo(t)) {}
// template <typename T> auto foo(T t) -> decltype(foo(&t)) {}
- //
if (SemaRef.IsBuildingRecoveryCallExpr)
- return ExprError();
+ return ExprResult();
BuildRecoveryCallExprRAII RCE(SemaRef);
CXXScopeSpec SS;
@@ -12696,10 +12882,14 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(),
Sema::LookupOrdinaryName);
- bool DoDiagnoseEmptyLookup = EmptyLookup;
- if (!DiagnoseTwoPhaseLookup(
- SemaRef, Fn->getExprLoc(), SS, R, OverloadCandidateSet::CSK_Normal,
- ExplicitTemplateArgs, Args, &DoDiagnoseEmptyLookup)) {
+ CXXRecordDecl *FoundInClass = nullptr;
+ if (DiagnoseTwoPhaseLookup(SemaRef, Fn->getExprLoc(), SS, R,
+ OverloadCandidateSet::CSK_Normal,
+ ExplicitTemplateArgs, Args, &FoundInClass)) {
+ // OK, diagnosed a two-phase lookup issue.
+ } else if (EmptyLookup) {
+ // Try to recover from an empty lookup with typo correction.
+ R.clear();
NoTypoCorrectionCCC NoTypoValidator{};
FunctionCallFilterCCC FunctionCallValidator(SemaRef, Args.size(),
ExplicitTemplateArgs != nullptr,
@@ -12708,12 +12898,24 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
AllowTypoCorrection
? static_cast<CorrectionCandidateCallback &>(FunctionCallValidator)
: static_cast<CorrectionCandidateCallback &>(NoTypoValidator);
- if (!DoDiagnoseEmptyLookup ||
- SemaRef.DiagnoseEmptyLookup(S, SS, R, Validator, ExplicitTemplateArgs,
+ if (SemaRef.DiagnoseEmptyLookup(S, SS, R, Validator, ExplicitTemplateArgs,
Args))
return ExprError();
+ } else if (FoundInClass && SemaRef.getLangOpts().MSVCCompat) {
+ // We found a usable declaration of the name in a dependent base of some
+ // enclosing class.
+ // FIXME: We should also explain why the candidates found by name lookup
+ // were not viable.
+ if (SemaRef.DiagnoseDependentMemberLookup(R))
+ return ExprError();
+ } else {
+ // We had viable candidates and couldn't recover; let the caller diagnose
+ // this.
+ return ExprResult();
}
+ // If we get here, we should have issued a diagnostic and formed a recovery
+ // lookup result.
assert(!R.empty() && "lookup results empty despite recovery");
// If recovery created an ambiguity, just bail out.
@@ -12795,8 +12997,9 @@ bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
// then create a type dependent CallExpr. The goal is to postpone name
// lookup to instantiation time to be able to search into type dependent
// base classes.
- CallExpr *CE = CallExpr::Create(Context, Fn, Args, Context.DependentTy,
- VK_RValue, RParenLoc);
+ CallExpr *CE =
+ CallExpr::Create(Context, Fn, Args, Context.DependentTy, VK_RValue,
+ RParenLoc, CurFPFeatureOverrides());
CE->markDependentForPostponedNameLookup();
*Result = CE;
return true;
@@ -12818,6 +13021,8 @@ static QualType chooseRecoveryType(OverloadCandidateSet &CS,
auto ConsiderCandidate = [&](const OverloadCandidate &Candidate) {
if (!Candidate.Function)
return;
+ if (Candidate.Function->isInvalidDecl())
+ return;
QualType T = Candidate.Function->getReturnType();
if (T.isNull())
return;
@@ -12843,7 +13048,12 @@ static QualType chooseRecoveryType(OverloadCandidateSet &CS,
for (const auto &C : CS)
ConsiderCandidate(C);
- return Result.getValueOr(QualType());
+ if (!Result)
+ return QualType();
+ auto Value = Result.getValue();
+ if (Value.isNull() || Value->isUndeducedType())
+ return QualType();
+ return Value;
}
/// FinishOverloadedCallExpr - given an OverloadCandidateSet, builds and returns
@@ -12859,11 +13069,6 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
OverloadCandidateSet::iterator *Best,
OverloadingResult OverloadResult,
bool AllowTypoCorrection) {
- if (CandidateSet->empty())
- return BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc, Args,
- RParenLoc, /*EmptyLookup=*/true,
- AllowTypoCorrection);
-
switch (OverloadResult) {
case OR_Success: {
FunctionDecl *FDecl = (*Best)->Function;
@@ -12881,9 +13086,9 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
// have meant to call.
ExprResult Recovery = BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc,
Args, RParenLoc,
- /*EmptyLookup=*/false,
+ CandidateSet->empty(),
AllowTypoCorrection);
- if (!Recovery.isInvalid())
+ if (Recovery.isInvalid() || Recovery.isUsable())
return Recovery;
// If the user passes in a function that we can't take the address of, we
@@ -12991,7 +13196,18 @@ ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
return Functions.size() > 1 ||
- (Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
+ (Functions.size() == 1 &&
+ isa<FunctionTemplateDecl>((*Functions.begin())->getUnderlyingDecl()));
+}
+
+ExprResult Sema::CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
+ NestedNameSpecifierLoc NNSLoc,
+ DeclarationNameInfo DNI,
+ const UnresolvedSetImpl &Fns,
+ bool PerformADL) {
+ return UnresolvedLookupExpr::Create(Context, NamingClass, NNSLoc, DNI,
+ PerformADL, IsOverloaded(Fns),
+ Fns.begin(), Fns.end());
}
/// Create a unary operation that may resolve to an overloaded
@@ -13044,10 +13260,11 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
CurFPFeatureOverrides());
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
- UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(
- Context, NamingClass, NestedNameSpecifierLoc(), OpNameInfo,
- /*ADL*/ true, IsOverloaded(Fns), Fns.begin(), Fns.end());
- return CXXOperatorCallExpr::Create(Context, Op, Fn, ArgsArray,
+ ExprResult Fn = CreateUnresolvedLookupExpr(
+ NamingClass, NestedNameSpecifierLoc(), OpNameInfo, Fns);
+ if (Fn.isInvalid())
+ return ExprError();
+ return CXXOperatorCallExpr::Create(Context, Op, Fn.get(), ArgsArray,
Context.DependentTy, VK_RValue, OpLoc,
CurFPFeatureOverrides());
}
@@ -13291,14 +13508,14 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (Fns.empty()) {
// If there are no functions to store, just build a dependent
// BinaryOperator or CompoundAssignment.
- if (Opc <= BO_Assign || Opc > BO_OrAssign)
- return BinaryOperator::Create(
- Context, Args[0], Args[1], Opc, Context.DependentTy, VK_RValue,
- OK_Ordinary, OpLoc, CurFPFeatureOverrides());
- return CompoundAssignOperator::Create(
- Context, Args[0], Args[1], Opc, Context.DependentTy, VK_LValue,
- OK_Ordinary, OpLoc, CurFPFeatureOverrides(), Context.DependentTy,
- Context.DependentTy);
+ if (BinaryOperator::isCompoundAssignmentOp(Opc))
+ return CompoundAssignOperator::Create(
+ Context, Args[0], Args[1], Opc, Context.DependentTy, VK_LValue,
+ OK_Ordinary, OpLoc, CurFPFeatureOverrides(), Context.DependentTy,
+ Context.DependentTy);
+ return BinaryOperator::Create(Context, Args[0], Args[1], Opc,
+ Context.DependentTy, VK_RValue, OK_Ordinary,
+ OpLoc, CurFPFeatureOverrides());
}
// FIXME: save results of ADL from here?
@@ -13306,10 +13523,11 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// TODO: provide better source location info in DNLoc component.
DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
DeclarationNameInfo OpNameInfo(OpName, OpLoc);
- UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(
- Context, NamingClass, NestedNameSpecifierLoc(), OpNameInfo,
- /*ADL*/ PerformADL, IsOverloaded(Fns), Fns.begin(), Fns.end());
- return CXXOperatorCallExpr::Create(Context, Op, Fn, Args,
+ ExprResult Fn = CreateUnresolvedLookupExpr(
+ NamingClass, NestedNameSpecifierLoc(), OpNameInfo, Fns, PerformADL);
+ if (Fn.isInvalid())
+ return ExprError();
+ return CXXOperatorCallExpr::Create(Context, Op, Fn.get(), Args,
Context.DependentTy, VK_RValue, OpLoc,
CurFPFeatureOverrides());
}
@@ -13773,15 +13991,13 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
// CHECKME: no 'operator' keyword?
DeclarationNameInfo OpNameInfo(OpName, LLoc);
OpNameInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
- UnresolvedLookupExpr *Fn
- = UnresolvedLookupExpr::Create(Context, NamingClass,
- NestedNameSpecifierLoc(), OpNameInfo,
- /*ADL*/ true, /*Overloaded*/ false,
- UnresolvedSetIterator(),
- UnresolvedSetIterator());
+ ExprResult Fn = CreateUnresolvedLookupExpr(
+ NamingClass, NestedNameSpecifierLoc(), OpNameInfo, UnresolvedSet<0>());
+ if (Fn.isInvalid())
+ return ExprError();
// Can't add any actual overloads yet
- return CXXOperatorCallExpr::Create(Context, OO_Subscript, Fn, Args,
+ return CXXOperatorCallExpr::Create(Context, OO_Subscript, Fn.get(), Args,
Context.DependentTy, VK_RValue, RLoc,
CurFPFeatureOverrides());
}
@@ -13932,11 +14148,11 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
/// parameter). The caller needs to validate that the member
/// expression refers to a non-static member function or an overloaded
/// member function.
-ExprResult
-Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
- SourceLocation LParenLoc,
- MultiExprArg Args,
- SourceLocation RParenLoc) {
+ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc,
+ bool AllowRecovery) {
assert(MemExprE->getType() == Context.BoundMemberTy ||
MemExprE->getType() == Context.OverloadTy);
@@ -13976,9 +14192,9 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
<< (qualsString.find(' ') == std::string::npos ? 1 : 2);
}
- CXXMemberCallExpr *call =
- CXXMemberCallExpr::Create(Context, MemExprE, Args, resultType,
- valueKind, RParenLoc, proto->getNumParams());
+ CXXMemberCallExpr *call = CXXMemberCallExpr::Create(
+ Context, MemExprE, Args, resultType, valueKind, RParenLoc,
+ CurFPFeatureOverrides(), proto->getNumParams());
if (CheckCallReturnType(proto->getReturnType(), op->getRHS()->getBeginLoc(),
call, nullptr))
@@ -13993,9 +14209,20 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
return MaybeBindToTemporary(call);
}
+ // We only try to build a recovery expr at this level if we can preserve
+ // the return type, otherwise we return ExprError() and let the caller
+ // recover.
+ auto BuildRecoveryExpr = [&](QualType Type) {
+ if (!AllowRecovery)
+ return ExprError();
+ std::vector<Expr *> SubExprs = {MemExprE};
+ llvm::for_each(Args, [&SubExprs](Expr *E) { SubExprs.push_back(E); });
+ return CreateRecoveryExpr(MemExprE->getBeginLoc(), RParenLoc, SubExprs,
+ Type);
+ };
if (isa<CXXPseudoDestructorExpr>(NakedMemExpr))
return CallExpr::Create(Context, MemExprE, Args, Context.VoidTy, VK_RValue,
- RParenLoc);
+ RParenLoc, CurFPFeatureOverrides());
UnbridgedCastsSet UnbridgedCasts;
if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts))
@@ -14067,6 +14294,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
UnbridgedCasts.restore();
OverloadCandidateSet::iterator Best;
+ bool Succeeded = false;
switch (CandidateSet.BestViableFunction(*this, UnresExpr->getBeginLoc(),
Best)) {
case OR_Success:
@@ -14074,7 +14302,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
FoundDecl = Best->FoundDecl;
CheckUnresolvedMemberAccess(UnresExpr, Best->FoundDecl);
if (DiagnoseUseOfDecl(Best->FoundDecl, UnresExpr->getNameLoc()))
- return ExprError();
+ break;
// If FoundDecl is different from Method (such as if one is a template
// and the other a specialization), make sure DiagnoseUseOfDecl is
// called on both.
@@ -14083,7 +14311,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// being used.
if (Method != FoundDecl.getDecl() &&
DiagnoseUseOfDecl(Method, UnresExpr->getNameLoc()))
- return ExprError();
+ break;
+ Succeeded = true;
break;
case OR_No_Viable_Function:
@@ -14093,27 +14322,25 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
PDiag(diag::err_ovl_no_viable_member_function_in_call)
<< DeclName << MemExprE->getSourceRange()),
*this, OCD_AllCandidates, Args);
- // FIXME: Leaking incoming expressions!
- return ExprError();
-
+ break;
case OR_Ambiguous:
CandidateSet.NoteCandidates(
PartialDiagnosticAt(UnresExpr->getMemberLoc(),
PDiag(diag::err_ovl_ambiguous_member_call)
<< DeclName << MemExprE->getSourceRange()),
*this, OCD_AmbiguousCandidates, Args);
- // FIXME: Leaking incoming expressions!
- return ExprError();
-
+ break;
case OR_Deleted:
CandidateSet.NoteCandidates(
PartialDiagnosticAt(UnresExpr->getMemberLoc(),
PDiag(diag::err_ovl_deleted_member_call)
<< DeclName << MemExprE->getSourceRange()),
*this, OCD_AllCandidates, Args);
- // FIXME: Leaking incoming expressions!
- return ExprError();
+ break;
}
+ // Overload resolution fails, try to recover.
+ if (!Succeeded)
+ return BuildRecoveryExpr(chooseRecoveryType(CandidateSet, &Best));
MemExprE = FixOverloadedFunctionReference(MemExprE, FoundDecl, Method);
@@ -14133,14 +14360,14 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
assert(Method && "Member call to something that isn't a method?");
const auto *Proto = Method->getType()->castAs<FunctionProtoType>();
- CXXMemberCallExpr *TheCall =
- CXXMemberCallExpr::Create(Context, MemExprE, Args, ResultType, VK,
- RParenLoc, Proto->getNumParams());
+ CXXMemberCallExpr *TheCall = CXXMemberCallExpr::Create(
+ Context, MemExprE, Args, ResultType, VK, RParenLoc,
+ CurFPFeatureOverrides(), Proto->getNumParams());
// Check for a valid return type.
if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(),
TheCall, Method))
- return ExprError();
+ return BuildRecoveryExpr(ResultType);
// Convert the object argument (for a non-static member function call).
// We only need to do this if there was actually an overload; otherwise
@@ -14157,7 +14384,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// Convert the rest of the arguments
if (ConvertArgumentsForCall(TheCall, MemExpr, Method, Proto, Args,
RParenLoc))
- return ExprError();
+ return BuildRecoveryExpr(ResultType);
DiagnoseSentinelCalls(Method, LParenLoc, Args);
@@ -14190,12 +14417,12 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
Diag(MemExpr->getBeginLoc(),
diag::warn_call_to_pure_virtual_member_function_from_ctor_dtor)
<< MD->getDeclName() << isa<CXXDestructorDecl>(CurContext)
- << MD->getParent()->getDeclName();
+ << MD->getParent();
Diag(MD->getBeginLoc(), diag::note_previous_decl) << MD->getDeclName();
if (getLangOpts().AppleKext)
Diag(MemExpr->getBeginLoc(), diag::note_pure_qualified_call_kext)
- << MD->getParent()->getDeclName() << MD->getDeclName();
+ << MD->getParent() << MD->getDeclName();
}
}
@@ -14377,9 +14604,9 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (Call.isInvalid())
return ExprError();
// Record usage of conversion in an implicit cast.
- Call = ImplicitCastExpr::Create(Context, Call.get()->getType(),
- CK_UserDefinedConversion, Call.get(),
- nullptr, VK_RValue);
+ Call = ImplicitCastExpr::Create(
+ Context, Call.get()->getType(), CK_UserDefinedConversion, Call.get(),
+ nullptr, VK_RValue, CurFPFeatureOverrides());
return BuildCallExpr(S, Call.get(), LParenLoc, Args, RParenLoc);
}
@@ -14678,7 +14905,7 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
UserDefinedLiteral *UDL = UserDefinedLiteral::Create(
Context, Fn.get(), llvm::makeArrayRef(ConvArgs, Args.size()), ResultTy,
- VK, LitEndLoc, UDSuffixLoc);
+ VK, LitEndLoc, UDSuffixLoc, CurFPFeatureOverrides());
if (CheckCallReturnType(FD->getReturnType(), UDSuffixLoc, UDL, FD))
return ExprError();
@@ -14724,12 +14951,12 @@ Sema::BuildForRangeBeginEndCall(SourceLocation Loc,
return FRS_DiagnosticIssued;
}
} else {
- UnresolvedSet<0> FoundNames;
- UnresolvedLookupExpr *Fn =
- UnresolvedLookupExpr::Create(Context, /*NamingClass=*/nullptr,
- NestedNameSpecifierLoc(), NameInfo,
- /*NeedsADL=*/true, /*Overloaded=*/false,
- FoundNames.begin(), FoundNames.end());
+ ExprResult FnR = CreateUnresolvedLookupExpr(/*NamingClass=*/nullptr,
+ NestedNameSpecifierLoc(),
+ NameInfo, UnresolvedSet<0>());
+ if (FnR.isInvalid())
+ return FRS_DiagnosticIssued;
+ UnresolvedLookupExpr *Fn = cast<UnresolvedLookupExpr>(FnR.get());
bool CandidateSetError = buildOverloadedCallSet(S, Fn, Fn, Range, Loc,
CandidateSet, CallExpr);
@@ -14784,10 +15011,9 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
if (SubExpr == ICE->getSubExpr())
return ICE;
- return ImplicitCastExpr::Create(Context, ICE->getType(),
- ICE->getCastKind(),
- SubExpr, nullptr,
- ICE->getValueKind());
+ return ImplicitCastExpr::Create(Context, ICE->getType(), ICE->getCastKind(),
+ SubExpr, nullptr, ICE->getValueKind(),
+ CurFPFeatureOverrides());
}
if (auto *GSE = dyn_cast<GenericSelectionExpr>(E)) {
diff --git a/clang/lib/Sema/SemaSYCL.cpp b/clang/lib/Sema/SemaSYCL.cpp
index db7603b42f7b..af35052ee1e3 100644
--- a/clang/lib/Sema/SemaSYCL.cpp
+++ b/clang/lib/Sema/SemaSYCL.cpp
@@ -17,19 +17,19 @@ using namespace clang;
// SYCL device specific diagnostics implementation
// -----------------------------------------------------------------------------
-Sema::DeviceDiagBuilder Sema::SYCLDiagIfDeviceCode(SourceLocation Loc,
- unsigned DiagID) {
+Sema::SemaDiagnosticBuilder Sema::SYCLDiagIfDeviceCode(SourceLocation Loc,
+ unsigned DiagID) {
assert(getLangOpts().SYCLIsDevice &&
"Should only be called during SYCL compilation");
FunctionDecl *FD = dyn_cast<FunctionDecl>(getCurLexicalContext());
- DeviceDiagBuilder::Kind DiagKind = [this, FD] {
+ SemaDiagnosticBuilder::Kind DiagKind = [this, FD] {
if (!FD)
- return DeviceDiagBuilder::K_Nop;
+ return SemaDiagnosticBuilder::K_Nop;
if (getEmissionStatus(FD) == Sema::FunctionEmissionStatus::Emitted)
- return DeviceDiagBuilder::K_ImmediateWithCallStack;
- return DeviceDiagBuilder::K_Deferred;
+ return SemaDiagnosticBuilder::K_ImmediateWithCallStack;
+ return SemaDiagnosticBuilder::K_Deferred;
}();
- return DeviceDiagBuilder(DiagKind, Loc, DiagID, FD, *this);
+ return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, FD, *this);
}
bool Sema::checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
@@ -42,8 +42,8 @@ bool Sema::checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
if (isUnevaluatedContext() || isConstantEvaluated())
return true;
- DeviceDiagBuilder::Kind DiagKind = DeviceDiagBuilder::K_Nop;
+ SemaDiagnosticBuilder::Kind DiagKind = SemaDiagnosticBuilder::K_Nop;
- return DiagKind != DeviceDiagBuilder::K_Immediate &&
- DiagKind != DeviceDiagBuilder::K_ImmediateWithCallStack;
+ return DiagKind != SemaDiagnosticBuilder::K_Immediate &&
+ DiagKind != SemaDiagnosticBuilder::K_ImmediateWithCallStack;
}
diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp
index 73f3183c163f..b24a8ab110b2 100644
--- a/clang/lib/Sema/SemaStmt.cpp
+++ b/clang/lib/Sema/SemaStmt.cpp
@@ -385,6 +385,14 @@ void Sema::ActOnStartOfCompoundStmt(bool IsStmtExpr) {
PushCompoundScope(IsStmtExpr);
}
+void Sema::ActOnAfterCompoundStatementLeadingPragmas() {
+ if (getCurFPFeatures().isFPConstrained()) {
+ FunctionScopeInfo *FSI = getCurFunction();
+ assert(FSI);
+ FSI->setUsesFPIntrin();
+ }
+}
+
void Sema::ActOnFinishOfCompoundStmt() {
PopCompoundScope();
}
@@ -397,11 +405,6 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr) {
const unsigned NumElts = Elts.size();
- // Mark the current function as usng floating point constrained intrinsics
- if (getCurFPFeatures().isFPConstrained())
- if (FunctionDecl *F = dyn_cast<FunctionDecl>(CurContext))
- F->setUsesFPIntrin(true);
-
// If we're in C89 mode, check that we don't have any decls after stmts. If
// so, emit an extension diagnostic.
if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) {
@@ -467,7 +470,7 @@ Sema::ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val) {
ExprResult ER = E;
if (!E->isValueDependent())
- ER = VerifyIntegerConstantExpression(E);
+ ER = VerifyIntegerConstantExpression(E, AllowFold);
if (!ER.isInvalid())
ER = DefaultLvalueConversion(ER.get());
if (!ER.isInvalid())
@@ -574,11 +577,11 @@ public:
};
}
-StmtResult
-Sema::ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt,
- ConditionResult Cond,
- Stmt *thenStmt, SourceLocation ElseLoc,
- Stmt *elseStmt) {
+StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
+ SourceLocation LParenLoc, Stmt *InitStmt,
+ ConditionResult Cond, SourceLocation RParenLoc,
+ Stmt *thenStmt, SourceLocation ElseLoc,
+ Stmt *elseStmt) {
if (Cond.isInvalid())
Cond = ConditionResult(
*this, nullptr,
@@ -597,12 +600,40 @@ Sema::ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt,
DiagnoseEmptyStmtBody(CondExpr->getEndLoc(), thenStmt,
diag::warn_empty_if_body);
- return BuildIfStmt(IfLoc, IsConstexpr, InitStmt, Cond, thenStmt, ElseLoc,
- elseStmt);
+ if (IsConstexpr) {
+ auto DiagnoseLikelihood = [&](const Stmt *S) {
+ if (const Attr *A = Stmt::getLikelihoodAttr(S)) {
+ Diags.Report(A->getLocation(),
+ diag::warn_attribute_has_no_effect_on_if_constexpr)
+ << A << A->getRange();
+ Diags.Report(IfLoc,
+ diag::note_attribute_has_no_effect_on_if_constexpr_here)
+ << SourceRange(IfLoc, LParenLoc.getLocWithOffset(-1));
+ }
+ };
+ DiagnoseLikelihood(thenStmt);
+ DiagnoseLikelihood(elseStmt);
+ } else {
+ std::tuple<bool, const Attr *, const Attr *> LHC =
+ Stmt::determineLikelihoodConflict(thenStmt, elseStmt);
+ if (std::get<0>(LHC)) {
+ const Attr *ThenAttr = std::get<1>(LHC);
+ const Attr *ElseAttr = std::get<2>(LHC);
+ Diags.Report(ThenAttr->getLocation(),
+ diag::warn_attributes_likelihood_ifstmt_conflict)
+ << ThenAttr << ThenAttr->getRange();
+ Diags.Report(ElseAttr->getLocation(), diag::note_conflicting_attribute)
+ << ElseAttr << ElseAttr->getRange();
+ }
+ }
+
+ return BuildIfStmt(IfLoc, IsConstexpr, LParenLoc, InitStmt, Cond, RParenLoc,
+ thenStmt, ElseLoc, elseStmt);
}
StmtResult Sema::BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
- Stmt *InitStmt, ConditionResult Cond,
+ SourceLocation LParenLoc, Stmt *InitStmt,
+ ConditionResult Cond, SourceLocation RParenLoc,
Stmt *thenStmt, SourceLocation ElseLoc,
Stmt *elseStmt) {
if (Cond.isInvalid())
@@ -612,7 +643,8 @@ StmtResult Sema::BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
setFunctionHasBranchProtectedScope();
return IfStmt::Create(Context, IfLoc, IsConstexpr, InitStmt, Cond.get().first,
- Cond.get().second, thenStmt, ElseLoc, elseStmt);
+ Cond.get().second, LParenLoc, RParenLoc, thenStmt,
+ ElseLoc, elseStmt);
}
namespace {
@@ -640,8 +672,7 @@ static bool CmpCaseVals(const std::pair<llvm::APSInt, CaseStmt*>& lhs,
return true;
if (lhs.first == rhs.first &&
- lhs.second->getCaseLoc().getRawEncoding()
- < rhs.second->getCaseLoc().getRawEncoding())
+ lhs.second->getCaseLoc() < rhs.second->getCaseLoc())
return true;
return false;
}
@@ -739,7 +770,9 @@ ExprResult Sema::CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond) {
}
StmtResult Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
- Stmt *InitStmt, ConditionResult Cond) {
+ SourceLocation LParenLoc,
+ Stmt *InitStmt, ConditionResult Cond,
+ SourceLocation RParenLoc) {
Expr *CondExpr = Cond.get().second;
assert((Cond.isInvalid() || CondExpr) && "switch with no condition");
@@ -761,7 +794,8 @@ StmtResult Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
setFunctionHasBranchIntoScope();
- auto *SS = SwitchStmt::Create(Context, InitStmt, Cond.get().first, CondExpr);
+ auto *SS = SwitchStmt::Create(Context, InitStmt, Cond.get().first, CondExpr,
+ LParenLoc, RParenLoc);
getCurFunction()->SwitchStack.push_back(
FunctionScopeInfo::SwitchInfo(SS, false));
return SS;
@@ -1244,10 +1278,10 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// Produce a nice diagnostic if multiple values aren't handled.
if (!UnhandledNames.empty()) {
- DiagnosticBuilder DB = Diag(CondExpr->getExprLoc(),
- TheDefaultStmt ? diag::warn_def_missing_case
+ auto DB = Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ ? diag::warn_def_missing_case
: diag::warn_missing_case)
- << (int)UnhandledNames.size();
+ << (int)UnhandledNames.size();
for (size_t I = 0, E = std::min(UnhandledNames.size(), (size_t)3);
I != E; ++I)
@@ -1787,15 +1821,27 @@ StmtResult Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
// C99 6.8.5p3: The declaration part of a 'for' statement shall only
// declare identifiers for objects having storage class 'auto' or
// 'register'.
+ const Decl *NonVarSeen = nullptr;
+ bool VarDeclSeen = false;
for (auto *DI : DS->decls()) {
- VarDecl *VD = dyn_cast<VarDecl>(DI);
- if (VD && VD->isLocalVarDecl() && !VD->hasLocalStorage())
- VD = nullptr;
- if (!VD) {
- Diag(DI->getLocation(), diag::err_non_local_variable_decl_in_for);
- DI->setInvalidDecl();
+ if (VarDecl *VD = dyn_cast<VarDecl>(DI)) {
+ VarDeclSeen = true;
+ if (VD->isLocalVarDecl() && !VD->hasLocalStorage()) {
+ Diag(DI->getLocation(), diag::err_non_local_variable_decl_in_for);
+ DI->setInvalidDecl();
+ }
+ } else if (!NonVarSeen) {
+ // Keep track of the first non-variable declaration we saw so that
+ // we can diagnose if we don't see any variable declarations. This
+ // covers a case like declaring a typedef, function, or structure
+ // type rather than a variable.
+ NonVarSeen = DI;
}
}
+ // Diagnose if we saw a non-variable declaration but no variable
+ // declarations.
+ if (NonVarSeen && !VarDeclSeen)
+ Diag(NonVarSeen->getLocation(), diag::err_non_variable_decl_in_for);
}
}
@@ -3039,12 +3085,13 @@ bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
// variable will no longer be used.
if (VD->hasAttr<BlocksAttr>()) return false;
+ // ...non-volatile...
+ if (VD->getType().isVolatileQualified())
+ return false;
+
if (CESK & CES_AllowDifferentTypes)
return true;
- // ...non-volatile...
- if (VD->getType().isVolatileQualified()) return false;
-
// Variables with higher required alignment than their type's ABI
// alignment cannot use NRVO.
if (!VD->getType()->isDependentType() && VD->hasAttr<AlignedAttr>() &&
@@ -3070,15 +3117,18 @@ bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
/// If move-initialization is not possible, such that we must fall back to
/// treating the operand as an lvalue, we will leave Res in its original
/// invalid state.
-static void TryMoveInitialization(Sema& S,
- const InitializedEntity &Entity,
+///
+/// \returns Whether we need to do the second overload resolution. If the first
+/// overload resolution fails, or if the first overload resolution succeeds but
+/// the selected constructor/operator doesn't match the additional criteria, we
+/// need to do the second overload resolution.
+static bool TryMoveInitialization(Sema &S, const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
- QualType ResultType,
- Expr *&Value,
+ QualType ResultType, Expr *&Value,
bool ConvertingConstructorsOnly,
- ExprResult &Res) {
+ bool IsDiagnosticsCheck, ExprResult &Res) {
ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack, Value->getType(),
- CK_NoOp, Value, VK_XValue);
+ CK_NoOp, Value, VK_XValue, FPOptionsOverride());
Expr *InitExpr = &AsRvalue;
@@ -3087,8 +3137,11 @@ static void TryMoveInitialization(Sema& S,
InitializationSequence Seq(S, Entity, Kind, InitExpr);
- if (!Seq)
- return;
+ bool NeedSecondOverloadResolution = true;
+ if (!Seq &&
+ (IsDiagnosticsCheck || Seq.getFailedOverloadResult() != OR_Deleted)) {
+ return NeedSecondOverloadResolution;
+ }
for (const InitializationSequence::Step &Step : Seq.steps()) {
if (Step.Kind != InitializationSequence::SK_ConstructorInitialization &&
@@ -3131,15 +3184,19 @@ static void TryMoveInitialization(Sema& S,
}
}
+ NeedSecondOverloadResolution = false;
// Promote "AsRvalue" to the heap, since we now need this
// expression node to persist.
- Value = ImplicitCastExpr::Create(S.Context, Value->getType(), CK_NoOp,
- Value, nullptr, VK_XValue);
+ Value =
+ ImplicitCastExpr::Create(S.Context, Value->getType(), CK_NoOp, Value,
+ nullptr, VK_XValue, FPOptionsOverride());
// Complete type-checking the initialization of the return type
// using the constructor we found.
Res = Seq.Perform(S, Entity, Kind, Value);
}
+
+ return NeedSecondOverloadResolution;
}
/// Perform the initialization of a potentially-movable value, which
@@ -3164,6 +3221,7 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
// select the constructor for the copy is first performed as if the object
// were designated by an rvalue.
ExprResult Res = ExprError();
+ bool NeedSecondOverloadResolution = true;
if (AllowNRVO) {
bool AffectedByCWG1579 = false;
@@ -3180,15 +3238,14 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
}
if (NRVOCandidate) {
- TryMoveInitialization(*this, Entity, NRVOCandidate, ResultType, Value,
- true, Res);
+ NeedSecondOverloadResolution = TryMoveInitialization(
+ *this, Entity, NRVOCandidate, ResultType, Value, true, false, Res);
}
- if (!Res.isInvalid() && AffectedByCWG1579) {
+ if (!NeedSecondOverloadResolution && AffectedByCWG1579) {
QualType QT = NRVOCandidate->getType();
- if (QT.getNonReferenceType()
- .getUnqualifiedType()
- .isTriviallyCopyableType(Context)) {
+ if (QT.getNonReferenceType().getUnqualifiedType().isTriviallyCopyableType(
+ Context)) {
// Adding 'std::move' around a trivially copyable variable is probably
// pointless. Don't suggest it.
} else {
@@ -3202,12 +3259,12 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
Str += NRVOCandidate->getDeclName().getAsString();
Str += ")";
Diag(Value->getExprLoc(), diag::warn_return_std_move_in_cxx11)
- << Value->getSourceRange()
- << NRVOCandidate->getDeclName() << ResultType << QT;
+ << Value->getSourceRange() << NRVOCandidate->getDeclName()
+ << ResultType << QT;
Diag(Value->getExprLoc(), diag::note_add_std_move_in_cxx11)
<< FixItHint::CreateReplacement(Value->getSourceRange(), Str);
}
- } else if (Res.isInvalid() &&
+ } else if (NeedSecondOverloadResolution &&
!getDiagnostics().isIgnored(diag::warn_return_std_move,
Value->getExprLoc())) {
const VarDecl *FakeNRVOCandidate =
@@ -3226,7 +3283,7 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
ExprResult FakeRes = ExprError();
Expr *FakeValue = Value;
TryMoveInitialization(*this, Entity, FakeNRVOCandidate, ResultType,
- FakeValue, false, FakeRes);
+ FakeValue, false, true, FakeRes);
if (!FakeRes.isInvalid()) {
bool IsThrow =
(Entity.getKind() == InitializedEntity::EK_Exception);
@@ -3248,7 +3305,7 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
// Either we didn't meet the criteria for treating an lvalue as an rvalue,
// above, or overload resolution failed. Either way, we need to try
// (again) now with the return value expression as written.
- if (Res.isInvalid())
+ if (NeedSecondOverloadResolution)
Res = PerformCopyInitialization(Entity, SourceLocation(), Value);
return Res;
@@ -3290,9 +3347,14 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
if (HasDeducedReturnType) {
+ FunctionDecl *FD = CurLambda->CallOperator;
+ // If we've already decided this lambda is invalid, e.g. because
+ // we saw a `return` whose expression had an error, don't keep
+ // trying to deduce its return type.
+ if (FD->isInvalidDecl())
+ return StmtError();
// In C++1y, the return type may involve 'auto'.
// FIXME: Blocks might have a return type of 'auto' explicitly specified.
- FunctionDecl *FD = CurLambda->CallOperator;
if (CurCap->ReturnType.isNull())
CurCap->ReturnType = FD->getReturnType();
@@ -3587,7 +3649,8 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope) {
// Correct typos, in case the containing function returns 'auto' and
// RetValExp should determine the deduced type.
- ExprResult RetVal = CorrectDelayedTyposInExpr(RetValExp);
+ ExprResult RetVal = CorrectDelayedTyposInExpr(
+ RetValExp, nullptr, /*RecoverUncorrectedTypos=*/true);
if (RetVal.isInvalid())
return StmtError();
StmtResult R = BuildReturnStmt(ReturnLoc, RetVal.get());
@@ -3625,12 +3688,11 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (FD->hasAttrs())
Attrs = &FD->getAttrs();
if (FD->isNoReturn())
- Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr)
- << FD->getDeclName();
+ Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr) << FD;
if (FD->isMain() && RetValExp)
if (isa<CXXBoolLiteralExpr>(RetValExp))
Diag(ReturnLoc, diag::warn_main_returns_bool_literal)
- << RetValExp->getSourceRange();
+ << RetValExp->getSourceRange();
if (FD->hasAttr<CmseNSEntryAttr>() && RetValExp) {
if (const auto *RT = dyn_cast<RecordType>(FnRetType.getCanonicalType())) {
if (RT->getDecl()->isOrContainsUnion())
@@ -3673,6 +3735,11 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (getLangOpts().CPlusPlus14) {
if (AutoType *AT = FnRetType->getContainedAutoType()) {
FunctionDecl *FD = cast<FunctionDecl>(CurContext);
+ // If we've already decided this function is invalid, e.g. because
+ // we saw a `return` whose expression had an error, don't keep
+ // trying to deduce its return type.
+ if (FD->isInvalidDecl())
+ return StmtError();
if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
FD->setInvalidDecl();
return StmtError();
@@ -3701,8 +3768,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
FunctionKind = 3;
Diag(ReturnLoc, diag::err_return_init_list)
- << CurDecl->getDeclName() << FunctionKind
- << RetValExp->getSourceRange();
+ << CurDecl << FunctionKind << RetValExp->getSourceRange();
// Drop the expression.
RetValExp = nullptr;
@@ -3729,9 +3795,8 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// return of void in constructor/destructor is illegal in C++.
if (D == diag::err_ctor_dtor_returns_void) {
NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
- Diag(ReturnLoc, D)
- << CurDecl->getDeclName() << isa<CXXDestructorDecl>(CurDecl)
- << RetValExp->getSourceRange();
+ Diag(ReturnLoc, D) << CurDecl << isa<CXXDestructorDecl>(CurDecl)
+ << RetValExp->getSourceRange();
}
// return (some void expression); is legal in C++.
else if (D != diag::ext_return_has_void_expr ||
@@ -3747,8 +3812,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
FunctionKind = 3;
Diag(ReturnLoc, D)
- << CurDecl->getDeclName() << FunctionKind
- << RetValExp->getSourceRange();
+ << CurDecl << FunctionKind << RetValExp->getSourceRange();
}
}
@@ -3766,25 +3830,26 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
} else if (!RetValExp && !HasDependentReturnType) {
FunctionDecl *FD = getCurFunctionDecl();
- unsigned DiagID;
if (getLangOpts().CPlusPlus11 && FD && FD->isConstexpr()) {
// C++11 [stmt.return]p2
- DiagID = diag::err_constexpr_return_missing_expr;
+ Diag(ReturnLoc, diag::err_constexpr_return_missing_expr)
+ << FD << FD->isConsteval();
FD->setInvalidDecl();
- } else if (getLangOpts().C99) {
- // C99 6.8.6.4p1 (ext_ since GCC warns)
- DiagID = diag::ext_return_missing_expr;
} else {
+ // C99 6.8.6.4p1 (ext_ since GCC warns)
// C90 6.6.6.4p4
- DiagID = diag::warn_return_missing_expr;
+ unsigned DiagID = getLangOpts().C99 ? diag::ext_return_missing_expr
+ : diag::warn_return_missing_expr;
+ // Note that at this point one of getCurFunctionDecl() or
+ // getCurMethodDecl() must be non-null (see above).
+ assert((getCurFunctionDecl() || getCurMethodDecl()) &&
+ "Not in a FunctionDecl or ObjCMethodDecl?");
+ bool IsMethod = FD == nullptr;
+ const NamedDecl *ND =
+ IsMethod ? cast<NamedDecl>(getCurMethodDecl()) : cast<NamedDecl>(FD);
+ Diag(ReturnLoc, DiagID) << ND << IsMethod;
}
- if (FD)
- Diag(ReturnLoc, DiagID)
- << FD->getIdentifier() << 0 /*fn*/ << FD->isConsteval();
- else
- Diag(ReturnLoc, DiagID) << getCurMethodDecl()->getDeclName() << 1/*meth*/;
-
Result = ReturnStmt::Create(Context, ReturnLoc, /* RetExpr=*/nullptr,
/* NRVOCandidate=*/nullptr);
} else {
diff --git a/clang/lib/Sema/SemaStmtAsm.cpp b/clang/lib/Sema/SemaStmtAsm.cpp
index 10fa24682f9c..3b631bf747c6 100644
--- a/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/clang/lib/Sema/SemaStmtAsm.cpp
@@ -448,9 +448,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
unsigned Size = Context.getTypeSize(Ty);
if (!Context.getTargetInfo().validateInputSize(FeatureMap,
Literal->getString(), Size))
- return StmtResult(
- targetDiag(InputExpr->getBeginLoc(), diag::err_asm_invalid_input_size)
- << Info.getConstraintStr());
+ return targetDiag(InputExpr->getBeginLoc(),
+ diag::err_asm_invalid_input_size)
+ << Info.getConstraintStr();
}
// Check that the clobbers are valid.
diff --git a/clang/lib/Sema/SemaStmtAttr.cpp b/clang/lib/Sema/SemaStmtAttr.cpp
index e9d3c755eb23..8031aa6b0ece 100644
--- a/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/clang/lib/Sema/SemaStmtAttr.cpp
@@ -10,13 +10,14 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/EvaluatedExprVisitor.h"
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
@@ -139,10 +140,18 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
LoopHintAttr::PipelineInitiationInterval)
.Case("distribute", LoopHintAttr::Distribute)
.Default(LoopHintAttr::Vectorize);
- if (Option == LoopHintAttr::VectorizeWidth ||
- Option == LoopHintAttr::InterleaveCount ||
- Option == LoopHintAttr::UnrollCount ||
- Option == LoopHintAttr::PipelineInitiationInterval) {
+ if (Option == LoopHintAttr::VectorizeWidth) {
+ assert((ValueExpr || (StateLoc && StateLoc->Ident)) &&
+ "Attribute must have a valid value expression or argument.");
+ if (ValueExpr && S.CheckLoopHintExpr(ValueExpr, St->getBeginLoc()))
+ return nullptr;
+ if (StateLoc && StateLoc->Ident && StateLoc->Ident->isStr("scalable"))
+ State = LoopHintAttr::ScalableWidth;
+ else
+ State = LoopHintAttr::FixedWidth;
+ } else if (Option == LoopHintAttr::InterleaveCount ||
+ Option == LoopHintAttr::UnrollCount ||
+ Option == LoopHintAttr::PipelineInitiationInterval) {
assert(ValueExpr && "Attribute must have a valid value expression.");
if (S.CheckLoopHintExpr(ValueExpr, St->getBeginLoc()))
return nullptr;
@@ -183,6 +192,7 @@ public:
bool foundCallExpr() { return FoundCallExpr; }
void VisitCallExpr(const CallExpr *E) { FoundCallExpr = true; }
+ void VisitAsmStmt(const AsmStmt *S) { FoundCallExpr = true; }
void Visit(const Stmt *St) {
if (!St)
@@ -209,6 +219,24 @@ static Attr *handleNoMergeAttr(Sema &S, Stmt *St, const ParsedAttr &A,
return ::new (S.Context) NoMergeAttr(S.Context, A);
}
+static Attr *handleLikely(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+
+ if (!S.getLangOpts().CPlusPlus20 && A.isCXX11Attribute() && !A.getScopeName())
+ S.Diag(A.getLoc(), diag::ext_cxx20_attr) << A << Range;
+
+ return ::new (S.Context) LikelyAttr(S.Context, A);
+}
+
+static Attr *handleUnlikely(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+
+ if (!S.getLangOpts().CPlusPlus20 && A.isCXX11Attribute() && !A.getScopeName())
+ S.Diag(A.getLoc(), diag::ext_cxx20_attr) << A << Range;
+
+ return ::new (S.Context) UnlikelyAttr(S.Context, A);
+}
+
static void
CheckForIncompatibleAttributes(Sema &S,
const SmallVectorImpl<const Attr *> &Attrs) {
@@ -314,6 +342,32 @@ CheckForIncompatibleAttributes(Sema &S,
<< CategoryState.NumericAttr->getDiagnosticName(Policy);
}
}
+
+ // C++20 [dcl.attr.likelihood]p1 The attribute-token likely shall not appear
+ // in an attribute-specifier-seq that contains the attribute-token unlikely.
+ const LikelyAttr *Likely = nullptr;
+ const UnlikelyAttr *Unlikely = nullptr;
+ for (const auto *I : Attrs) {
+ if (const auto *Attr = dyn_cast<LikelyAttr>(I)) {
+ if (Unlikely) {
+ S.Diag(Attr->getLocation(), diag::err_attributes_are_not_compatible)
+ << Attr << Unlikely << Attr->getRange();
+ S.Diag(Unlikely->getLocation(), diag::note_conflicting_attribute)
+ << Unlikely->getRange();
+ return;
+ }
+ Likely = Attr;
+ } else if (const auto *Attr = dyn_cast<UnlikelyAttr>(I)) {
+ if (Likely) {
+ S.Diag(Attr->getLocation(), diag::err_attributes_are_not_compatible)
+ << Attr << Likely << Attr->getRange();
+ S.Diag(Likely->getLocation(), diag::note_conflicting_attribute)
+ << Likely->getRange();
+ return;
+ }
+ Unlikely = Attr;
+ }
+ }
}
static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
@@ -335,15 +389,15 @@ static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
if (NumArgs == 1) {
Expr *E = A.getArgAsExpr(0);
- llvm::APSInt ArgVal(32);
+ Optional<llvm::APSInt> ArgVal;
- if (!E->isIntegerConstantExpr(ArgVal, S.Context)) {
+ if (!(ArgVal = E->getIntegerConstantExpr(S.Context))) {
S.Diag(A.getLoc(), diag::err_attribute_argument_type)
<< A << AANT_ArgumentIntegerConstant << E->getSourceRange();
return nullptr;
}
- int Val = ArgVal.getSExtValue();
+ int Val = ArgVal->getSExtValue();
if (Val <= 0) {
S.Diag(A.getRange().getBegin(),
@@ -364,7 +418,7 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
S.Diag(A.getLoc(), A.isDeclspecAttribute()
? (unsigned)diag::warn_unhandled_ms_attribute_ignored
: (unsigned)diag::warn_unknown_attribute_ignored)
- << A;
+ << A << A.getRange();
return nullptr;
case ParsedAttr::AT_FallThrough:
return handleFallThroughAttr(S, St, A, Range);
@@ -376,6 +430,10 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
return handleSuppressAttr(S, St, A, Range);
case ParsedAttr::AT_NoMerge:
return handleNoMergeAttr(S, St, A, Range);
+ case ParsedAttr::AT_Likely:
+ return handleLikely(S, St, A, Range);
+ case ParsedAttr::AT_Unlikely:
+ return handleUnlikely(S, St, A, Range);
default:
// if we're here, then we parsed a known attribute, but didn't recognize
// it as a statement attribute => it is declaration attribute
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index c05ed0b14e3e..12880b95b9c6 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -23,6 +23,7 @@
#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
#include "clang/Sema/ParsedTemplate.h"
@@ -938,11 +939,10 @@ static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
TArg = TemplateArgument(Template, Optional<unsigned int>());
else
TArg = Template;
- return TemplateArgumentLoc(TArg,
- Arg.getScopeSpec().getWithLocInContext(
- SemaRef.Context),
- Arg.getLocation(),
- Arg.getEllipsisLoc());
+ return TemplateArgumentLoc(
+ SemaRef.Context, TArg,
+ Arg.getScopeSpec().getWithLocInContext(SemaRef.Context),
+ Arg.getLocation(), Arg.getEllipsisLoc());
}
}
@@ -1176,7 +1176,11 @@ static ExprResult formImmediatelyDeclaredConstraint(
// template<C1... T> struct s1;
//
// The constraint: (C1<T> && ...)
- return S.BuildCXXFoldExpr(/*LParenLoc=*/SourceLocation(),
+ //
+ // Note that the type of C1<T> is known to be 'bool', so we don't need to do
+ // any unqualified lookups for 'operator&&' here.
+ return S.BuildCXXFoldExpr(/*UnqualifiedLookup=*/nullptr,
+ /*LParenLoc=*/SourceLocation(),
ImmediatelyDeclaredConstraint.get(), BO_LAnd,
EllipsisLoc, /*RHS=*/nullptr,
/*RParenLoc=*/SourceLocation(),
@@ -1274,6 +1278,108 @@ QualType Sema::CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
return CheckNonTypeTemplateParameterType(TSI->getType(), Loc);
}
+/// Require the given type to be a structural type, and diagnose if it is not.
+///
+/// \return \c true if an error was produced.
+bool Sema::RequireStructuralType(QualType T, SourceLocation Loc) {
+ if (T->isDependentType())
+ return false;
+
+ if (RequireCompleteType(Loc, T, diag::err_template_nontype_parm_incomplete))
+ return true;
+
+ if (T->isStructuralType())
+ return false;
+
+ // Structural types are required to be object types or lvalue references.
+ if (T->isRValueReferenceType()) {
+ Diag(Loc, diag::err_template_nontype_parm_rvalue_ref) << T;
+ return true;
+ }
+
+ // Don't mention structural types in our diagnostic prior to C++20. Also,
+ // there's not much more we can say about non-scalar non-class types --
+ // because we can't see functions or arrays here, those can only be language
+ // extensions.
+ if (!getLangOpts().CPlusPlus20 ||
+ (!T->isScalarType() && !T->isRecordType())) {
+ Diag(Loc, diag::err_template_nontype_parm_bad_type) << T;
+ return true;
+ }
+
+ // Structural types are required to be literal types.
+ if (RequireLiteralType(Loc, T, diag::err_template_nontype_parm_not_literal))
+ return true;
+
+ Diag(Loc, diag::err_template_nontype_parm_not_structural) << T;
+
+ // Drill down into the reason why the class is non-structural.
+ while (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) {
+ // All members are required to be public and non-mutable, and can't be of
+ // rvalue reference type. Check these conditions first to prefer a "local"
+ // reason over a more distant one.
+ for (const FieldDecl *FD : RD->fields()) {
+ if (FD->getAccess() != AS_public) {
+ Diag(FD->getLocation(), diag::note_not_structural_non_public) << T << 0;
+ return true;
+ }
+ if (FD->isMutable()) {
+ Diag(FD->getLocation(), diag::note_not_structural_mutable_field) << T;
+ return true;
+ }
+ if (FD->getType()->isRValueReferenceType()) {
+ Diag(FD->getLocation(), diag::note_not_structural_rvalue_ref_field)
+ << T;
+ return true;
+ }
+ }
+
+ // All bases are required to be public.
+ for (const auto &BaseSpec : RD->bases()) {
+ if (BaseSpec.getAccessSpecifier() != AS_public) {
+ Diag(BaseSpec.getBaseTypeLoc(), diag::note_not_structural_non_public)
+ << T << 1;
+ return true;
+ }
+ }
+
+ // All subobjects are required to be of structural types.
+ SourceLocation SubLoc;
+ QualType SubType;
+ int Kind = -1;
+
+ for (const FieldDecl *FD : RD->fields()) {
+ QualType T = Context.getBaseElementType(FD->getType());
+ if (!T->isStructuralType()) {
+ SubLoc = FD->getLocation();
+ SubType = T;
+ Kind = 0;
+ break;
+ }
+ }
+
+ if (Kind == -1) {
+ for (const auto &BaseSpec : RD->bases()) {
+ QualType T = BaseSpec.getType();
+ if (!T->isStructuralType()) {
+ SubLoc = BaseSpec.getBaseTypeLoc();
+ SubType = T;
+ Kind = 1;
+ break;
+ }
+ }
+ }
+
+ assert(Kind != -1 && "couldn't find reason why type is not structural");
+ Diag(SubLoc, diag::note_not_structural_subobject)
+ << T << Kind << SubType;
+ T = SubType;
+ RD = T->getAsCXXRecordDecl();
+ }
+
+ return true;
+}
+
QualType Sema::CheckNonTypeTemplateParameterType(QualType T,
SourceLocation Loc) {
// We don't allow variably-modified types as the type of non-type template
@@ -1293,13 +1399,13 @@ QualType Sema::CheckNonTypeTemplateParameterType(QualType T,
if (T->isIntegralOrEnumerationType() ||
// -- pointer to object or pointer to function,
T->isPointerType() ||
- // -- reference to object or reference to function,
- T->isReferenceType() ||
+ // -- lvalue reference to object or lvalue reference to function,
+ T->isLValueReferenceType() ||
// -- pointer to member,
T->isMemberPointerType() ||
- // -- std::nullptr_t.
+ // -- std::nullptr_t, or
T->isNullPtrType() ||
- // Allow use of auto in template parameter declarations.
+ // -- a type that contains a placeholder type.
T->isUndeducedType()) {
// C++ [temp.param]p5: The top-level cv-qualifiers on the template-parameter
// are ignored when determining its type.
@@ -1323,10 +1429,21 @@ QualType Sema::CheckNonTypeTemplateParameterType(QualType T,
if (T->isDependentType())
return T.getUnqualifiedType();
- Diag(Loc, diag::err_template_nontype_parm_bad_type)
- << T;
+ // C++20 [temp.param]p6:
+ // -- a structural type
+ if (RequireStructuralType(T, Loc))
+ return QualType();
+
+ if (!getLangOpts().CPlusPlus20) {
+ // FIXME: Consider allowing structural types as an extension in C++17. (In
+ // earlier language modes, the template argument evaluation rules are too
+ // inflexible.)
+ Diag(Loc, diag::err_template_nontype_parm_bad_structural_type) << T;
+ return QualType();
+ }
- return QualType();
+ Diag(Loc, diag::warn_cxx17_compat_template_nontype_parm_type) << T;
+ return T.getUnqualifiedType();
}
NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
@@ -1960,27 +2077,27 @@ public:
QualType TransformTypedefType(TypeLocBuilder &TLB, TypedefTypeLoc TL) {
ASTContext &Context = SemaRef.getASTContext();
TypedefNameDecl *OrigDecl = TL.getTypedefNameDecl();
- TypeLocBuilder InnerTLB;
- QualType Transformed =
- TransformType(InnerTLB, OrigDecl->getTypeSourceInfo()->getTypeLoc());
- TypeSourceInfo *TSI =
- TransformType(InnerTLB.getTypeSourceInfo(Context, Transformed));
-
- TypedefNameDecl *Decl = nullptr;
-
- if (isa<TypeAliasDecl>(OrigDecl))
- Decl = TypeAliasDecl::Create(
- Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
- OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
- else {
- assert(isa<TypedefDecl>(OrigDecl) && "Not a Type alias or typedef");
- Decl = TypedefDecl::Create(
- Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
- OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
+ TypedefNameDecl *Decl = OrigDecl;
+ // Transform the underlying type of the typedef and clone the Decl only if
+ // the typedef has a dependent context.
+ if (OrigDecl->getDeclContext()->isDependentContext()) {
+ TypeLocBuilder InnerTLB;
+ QualType Transformed =
+ TransformType(InnerTLB, OrigDecl->getTypeSourceInfo()->getTypeLoc());
+ TypeSourceInfo *TSI = InnerTLB.getTypeSourceInfo(Context, Transformed);
+ if (isa<TypeAliasDecl>(OrigDecl))
+ Decl = TypeAliasDecl::Create(
+ Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
+ OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
+ else {
+ assert(isa<TypedefDecl>(OrigDecl) && "Not a Type alias or typedef");
+ Decl = TypedefDecl::Create(
+ Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
+ OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
+ }
+ MaterializedTypedefs.push_back(Decl);
}
- MaterializedTypedefs.push_back(Decl);
-
QualType TDTy = Context.getTypedefType(Decl);
TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(TDTy);
TypedefTL.setNameLoc(TL.getNameLoc());
@@ -3544,7 +3661,6 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
QualType CanonType;
- bool InstantiationDependent = false;
if (TypeAliasTemplateDecl *AliasTemplate =
dyn_cast<TypeAliasTemplateDecl>(Template)) {
@@ -3607,7 +3723,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
}
} else if (Name.isDependent() ||
TemplateSpecializationType::anyDependentTemplateArguments(
- TemplateArgs, InstantiationDependent)) {
+ TemplateArgs, Converted)) {
// This class template specialization is a dependent
// type. Therefore, its canonical type is another class template
// specialization type that contains all of the converted
@@ -3675,11 +3791,15 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
Decl->setLexicalDeclContext(ClassTemplate->getLexicalDeclContext());
}
- if (Decl->getSpecializationKind() == TSK_Undeclared) {
- MultiLevelTemplateArgumentList TemplateArgLists;
- TemplateArgLists.addOuterTemplateArguments(Converted);
- InstantiateAttrsForDecl(TemplateArgLists, ClassTemplate->getTemplatedDecl(),
- Decl);
+ if (Decl->getSpecializationKind() == TSK_Undeclared &&
+ ClassTemplate->getTemplatedDecl()->hasAttrs()) {
+ InstantiatingTemplate Inst(*this, TemplateLoc, Decl);
+ if (!Inst.isInvalid()) {
+ MultiLevelTemplateArgumentList TemplateArgLists;
+ TemplateArgLists.addOuterTemplateArguments(Converted);
+ InstantiateAttrsForDecl(TemplateArgLists,
+ ClassTemplate->getTemplatedDecl(), Decl);
+ }
}
// Diagnose uses of this specialization.
@@ -4194,11 +4314,9 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
// FIXME: Move these checks to CheckTemplatePartialSpecializationArgs so we
// also do them during instantiation.
- bool InstantiationDependent;
if (!Name.isDependent() &&
- !TemplateSpecializationType::anyDependentTemplateArguments(
- TemplateArgs.arguments(),
- InstantiationDependent)) {
+ !TemplateSpecializationType::anyDependentTemplateArguments(TemplateArgs,
+ Converted)) {
Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
<< VarTemplate->getDeclName();
IsPartialSpecialization = false;
@@ -4358,6 +4476,12 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
Converted, /*UpdateArgsWithConversion=*/true))
return true;
+ // Produce a placeholder value if the specialization is dependent.
+ if (Template->getDeclContext()->isDependentContext() ||
+ TemplateSpecializationType::anyDependentTemplateArguments(TemplateArgs,
+ Converted))
+ return DeclResult();
+
// Find the variable template specialization declaration that
// corresponds to these arguments.
void *InsertPos = nullptr;
@@ -4385,84 +4509,75 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
// 1. Attempt to find the closest partial specialization that this
// specializes, if any.
- // If any of the template arguments is dependent, then this is probably
- // a placeholder for an incomplete declarative context; which must be
- // complete by instantiation time. Thus, do not search through the partial
- // specializations yet.
// TODO: Unify with InstantiateClassTemplateSpecialization()?
// Perhaps better after unification of DeduceTemplateArguments() and
// getMoreSpecializedPartialSpecialization().
- bool InstantiationDependent = false;
- if (!TemplateSpecializationType::anyDependentTemplateArguments(
- TemplateArgs, InstantiationDependent)) {
+ SmallVector<VarTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ Template->getPartialSpecializations(PartialSpecs);
- SmallVector<VarTemplatePartialSpecializationDecl *, 4> PartialSpecs;
- Template->getPartialSpecializations(PartialSpecs);
-
- for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
- VarTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
- TemplateDeductionInfo Info(FailedCandidates.getLocation());
+ for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
+ VarTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
+ TemplateDeductionInfo Info(FailedCandidates.getLocation());
- if (TemplateDeductionResult Result =
- DeduceTemplateArguments(Partial, TemplateArgList, Info)) {
- // Store the failed-deduction information for use in diagnostics, later.
- // TODO: Actually use the failed-deduction info?
- FailedCandidates.addCandidate().set(
- DeclAccessPair::make(Template, AS_public), Partial,
- MakeDeductionFailureInfo(Context, Result, Info));
- (void)Result;
- } else {
- Matched.push_back(PartialSpecMatchResult());
- Matched.back().Partial = Partial;
- Matched.back().Args = Info.take();
- }
+ if (TemplateDeductionResult Result =
+ DeduceTemplateArguments(Partial, TemplateArgList, Info)) {
+ // Store the failed-deduction information for use in diagnostics, later.
+ // TODO: Actually use the failed-deduction info?
+ FailedCandidates.addCandidate().set(
+ DeclAccessPair::make(Template, AS_public), Partial,
+ MakeDeductionFailureInfo(Context, Result, Info));
+ (void)Result;
+ } else {
+ Matched.push_back(PartialSpecMatchResult());
+ Matched.back().Partial = Partial;
+ Matched.back().Args = Info.take();
}
+ }
- if (Matched.size() >= 1) {
- SmallVector<MatchResult, 4>::iterator Best = Matched.begin();
- if (Matched.size() == 1) {
- // -- If exactly one matching specialization is found, the
- // instantiation is generated from that specialization.
- // We don't need to do anything for this.
- } else {
- // -- If more than one matching specialization is found, the
- // partial order rules (14.5.4.2) are used to determine
- // whether one of the specializations is more specialized
- // than the others. If none of the specializations is more
- // specialized than all of the other matching
- // specializations, then the use of the variable template is
- // ambiguous and the program is ill-formed.
- for (SmallVector<MatchResult, 4>::iterator P = Best + 1,
- PEnd = Matched.end();
- P != PEnd; ++P) {
- if (getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
- PointOfInstantiation) ==
- P->Partial)
- Best = P;
- }
+ if (Matched.size() >= 1) {
+ SmallVector<MatchResult, 4>::iterator Best = Matched.begin();
+ if (Matched.size() == 1) {
+ // -- If exactly one matching specialization is found, the
+ // instantiation is generated from that specialization.
+ // We don't need to do anything for this.
+ } else {
+ // -- If more than one matching specialization is found, the
+ // partial order rules (14.5.4.2) are used to determine
+ // whether one of the specializations is more specialized
+ // than the others. If none of the specializations is more
+ // specialized than all of the other matching
+ // specializations, then the use of the variable template is
+ // ambiguous and the program is ill-formed.
+ for (SmallVector<MatchResult, 4>::iterator P = Best + 1,
+ PEnd = Matched.end();
+ P != PEnd; ++P) {
+ if (getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
+ PointOfInstantiation) ==
+ P->Partial)
+ Best = P;
+ }
- // Determine if the best partial specialization is more specialized than
- // the others.
- for (SmallVector<MatchResult, 4>::iterator P = Matched.begin(),
- PEnd = Matched.end();
- P != PEnd; ++P) {
- if (P != Best && getMoreSpecializedPartialSpecialization(
- P->Partial, Best->Partial,
- PointOfInstantiation) != Best->Partial) {
- AmbiguousPartialSpec = true;
- break;
- }
+ // Determine if the best partial specialization is more specialized than
+ // the others.
+ for (SmallVector<MatchResult, 4>::iterator P = Matched.begin(),
+ PEnd = Matched.end();
+ P != PEnd; ++P) {
+ if (P != Best && getMoreSpecializedPartialSpecialization(
+ P->Partial, Best->Partial,
+ PointOfInstantiation) != Best->Partial) {
+ AmbiguousPartialSpec = true;
+ break;
}
}
-
- // Instantiate using the best variable template partial specialization.
- InstantiationPattern = Best->Partial;
- InstantiationArgs = Best->Args;
- } else {
- // -- If no match is found, the instantiation is generated
- // from the primary template.
- // InstantiationPattern = Template->getTemplatedDecl();
}
+
+ // Instantiate using the best variable template partial specialization.
+ InstantiationPattern = Best->Partial;
+ InstantiationArgs = Best->Args;
+ } else {
+ // -- If no match is found, the instantiation is generated
+ // from the primary template.
+ // InstantiationPattern = Template->getTemplatedDecl();
}
// 2. Create the canonical declaration.
@@ -4471,7 +4586,7 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
// FIXME: LateAttrs et al.?
VarTemplateSpecializationDecl *Decl = BuildVarTemplateInstantiation(
Template, InstantiationPattern, *InstantiationArgs, TemplateArgs,
- Converted, TemplateNameLoc, InsertPos /*, LateAttrs, StartingScope*/);
+ Converted, TemplateNameLoc /*, LateAttrs, StartingScope*/);
if (!Decl)
return true;
@@ -4510,6 +4625,9 @@ Sema::CheckVarTemplateId(const CXXScopeSpec &SS,
if (Decl.isInvalid())
return ExprError();
+ if (!Decl.get())
+ return ExprResult();
+
VarDecl *Var = cast<VarDecl>(Decl.get());
if (!Var->getTemplateSpecializationKind())
Var->setTemplateSpecializationKind(TSK_ImplicitInstantiation,
@@ -4547,22 +4665,16 @@ Sema::CheckConceptTemplateId(const CXXScopeSpec &SS,
return ExprError();
ConstraintSatisfaction Satisfaction;
- bool AreArgsDependent = false;
- for (TemplateArgument &Arg : Converted) {
- if (Arg.isDependent()) {
- AreArgsDependent = true;
- break;
- }
- }
+ bool AreArgsDependent =
+ TemplateSpecializationType::anyDependentTemplateArguments(*TemplateArgs,
+ Converted);
if (!AreArgsDependent &&
- CheckConstraintSatisfaction(NamedConcept,
- {NamedConcept->getConstraintExpr()},
- Converted,
- SourceRange(SS.isSet() ? SS.getBeginLoc() :
- ConceptNameInfo.getLoc(),
- TemplateArgs->getRAngleLoc()),
- Satisfaction))
- return ExprError();
+ CheckConstraintSatisfaction(
+ NamedConcept, {NamedConcept->getConstraintExpr()}, Converted,
+ SourceRange(SS.isSet() ? SS.getBeginLoc() : ConceptNameInfo.getLoc(),
+ TemplateArgs->getRAngleLoc()),
+ Satisfaction))
+ return ExprError();
return ConceptSpecializationExpr::Create(Context,
SS.isSet() ? SS.getWithLocInContext(Context) : NestedNameSpecifierLoc{},
@@ -4597,18 +4709,14 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
}
}
- auto AnyDependentArguments = [&]() -> bool {
- bool InstantiationDependent;
- return TemplateArgs &&
- TemplateSpecializationType::anyDependentTemplateArguments(
- *TemplateArgs, InstantiationDependent);
- };
-
// In C++1y, check variable template ids.
- if (R.getAsSingle<VarTemplateDecl>() && !AnyDependentArguments()) {
- return CheckVarTemplateId(SS, R.getLookupNameInfo(),
- R.getAsSingle<VarTemplateDecl>(),
- TemplateKWLoc, TemplateArgs);
+ if (R.getAsSingle<VarTemplateDecl>()) {
+ ExprResult Res = CheckVarTemplateId(SS, R.getLookupNameInfo(),
+ R.getAsSingle<VarTemplateDecl>(),
+ TemplateKWLoc, TemplateArgs);
+ if (Res.isInvalid() || Res.isUsable())
+ return Res;
+ // Result is dependent. Carry on to build an UnresolvedLookupEpxr.
}
if (R.getAsSingle<ConceptDecl>()) {
@@ -5158,15 +5266,17 @@ Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
if (TName.isNull())
return TemplateArgumentLoc();
- return TemplateArgumentLoc(TemplateArgument(TName),
- TempTempParm->getDefaultArgument().getTemplateQualifierLoc(),
- TempTempParm->getDefaultArgument().getTemplateNameLoc());
+ return TemplateArgumentLoc(
+ Context, TemplateArgument(TName),
+ TempTempParm->getDefaultArgument().getTemplateQualifierLoc(),
+ TempTempParm->getDefaultArgument().getTemplateNameLoc());
}
/// Convert a template-argument that we parsed as a type into a template, if
/// possible. C++ permits injected-class-names to perform dual service as
/// template template arguments and as template type arguments.
-static TemplateArgumentLoc convertTypeTemplateArgumentToTemplate(TypeLoc TLoc) {
+static TemplateArgumentLoc
+convertTypeTemplateArgumentToTemplate(ASTContext &Context, TypeLoc TLoc) {
// Extract and step over any surrounding nested-name-specifier.
NestedNameSpecifierLoc QualLoc;
if (auto ETLoc = TLoc.getAs<ElaboratedTypeLoc>()) {
@@ -5176,11 +5286,10 @@ static TemplateArgumentLoc convertTypeTemplateArgumentToTemplate(TypeLoc TLoc) {
QualLoc = ETLoc.getQualifierLoc();
TLoc = ETLoc.getNamedTypeLoc();
}
-
// If this type was written as an injected-class-name, it can be used as a
// template template argument.
if (auto InjLoc = TLoc.getAs<InjectedClassNameTypeLoc>())
- return TemplateArgumentLoc(InjLoc.getTypePtr()->getTemplateName(),
+ return TemplateArgumentLoc(Context, InjLoc.getTypePtr()->getTemplateName(),
QualLoc, InjLoc.getNameLoc());
// If this type was written as an injected-class-name, it may have been
@@ -5190,7 +5299,8 @@ static TemplateArgumentLoc convertTypeTemplateArgumentToTemplate(TypeLoc TLoc) {
if (auto RecLoc = TLoc.getAs<RecordTypeLoc>())
if (auto *CTSD =
dyn_cast<ClassTemplateSpecializationDecl>(RecLoc.getDecl()))
- return TemplateArgumentLoc(TemplateName(CTSD->getSpecializedTemplate()),
+ return TemplateArgumentLoc(Context,
+ TemplateName(CTSD->getSpecializedTemplate()),
QualLoc, RecLoc.getNameLoc());
return TemplateArgumentLoc();
@@ -5429,7 +5539,7 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
// itself.
if (Arg.getArgument().getKind() == TemplateArgument::Type) {
TemplateArgumentLoc ConvertedArg = convertTypeTemplateArgumentToTemplate(
- Arg.getTypeSourceInfo()->getTypeLoc());
+ Context, Arg.getTypeSourceInfo()->getTypeLoc());
if (!ConvertedArg.getArgument().isNull())
Arg = ConvertedArg;
}
@@ -5468,39 +5578,6 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
return false;
}
-/// Check whether the template parameter is a pack expansion, and if so,
-/// determine the number of parameters produced by that expansion. For instance:
-///
-/// \code
-/// template<typename ...Ts> struct A {
-/// template<Ts ...NTs, template<Ts> class ...TTs, typename ...Us> struct B;
-/// };
-/// \endcode
-///
-/// In \c A<int,int>::B, \c NTs and \c TTs have expanded pack size 2, and \c Us
-/// is not a pack expansion, so returns an empty Optional.
-static Optional<unsigned> getExpandedPackSize(NamedDecl *Param) {
- if (TemplateTypeParmDecl *TTP
- = dyn_cast<TemplateTypeParmDecl>(Param)) {
- if (TTP->isExpandedParameterPack())
- return TTP->getNumExpansionParameters();
- }
-
- if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- if (NTTP->isExpandedParameterPack())
- return NTTP->getNumExpansionTypes();
- }
-
- if (TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(Param)) {
- if (TTP->isExpandedParameterPack())
- return TTP->getNumExpansionTemplateParameters();
- }
-
- return None;
-}
-
/// Diagnose a missing template argument.
template<typename TemplateParmDecl>
static bool diagnoseMissingArgument(Sema &S, SourceLocation Loc,
@@ -5748,8 +5825,9 @@ bool Sema::CheckTemplateArgumentList(
if (Name.isNull())
return true;
- Arg = TemplateArgumentLoc(TemplateArgument(Name), QualifierLoc,
- TempParm->getDefaultArgument().getTemplateNameLoc());
+ Arg = TemplateArgumentLoc(
+ Context, TemplateArgument(Name), QualifierLoc,
+ TempParm->getDefaultArgument().getTemplateNameLoc());
}
// Introduce an instantiation record that describes where we are using
@@ -6543,8 +6621,8 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return true;
// Create the template argument.
- Converted =
- TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()), ParamType);
+ Converted = TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()),
+ S.Context.getCanonicalType(ParamType));
S.MarkAnyDeclReferenced(Arg->getBeginLoc(), Entity, false);
return false;
}
@@ -6665,7 +6743,7 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
Converted = TemplateArgument(Arg);
} else {
ValueDecl *D = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
- Converted = TemplateArgument(D, ParamType);
+ Converted = TemplateArgument(D, S.Context.getCanonicalType(ParamType));
}
return Invalid;
}
@@ -6691,14 +6769,15 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
SourceLocation StartLoc = Arg->getBeginLoc();
// If the parameter type somehow involves auto, deduce the type now.
- if (getLangOpts().CPlusPlus17 && ParamType->isUndeducedType()) {
+ DeducedType *DeducedT = ParamType->getContainedDeducedType();
+ if (getLangOpts().CPlusPlus17 && DeducedT && !DeducedT->isDeduced()) {
// During template argument deduction, we allow 'decltype(auto)' to
// match an arbitrary dependent argument.
// FIXME: The language rules don't say what happens in this case.
// FIXME: We get an opaque dependent type out of decltype(auto) if the
// expression is merely instantiation-dependent; is this enough?
if (CTAK == CTAK_Deduced && Arg->isTypeDependent()) {
- auto *AT = dyn_cast<AutoType>(ParamType);
+ auto *AT = dyn_cast<AutoType>(DeducedT);
if (AT && AT->isDecltypeAuto()) {
Converted = TemplateArgument(Arg);
return Arg;
@@ -6712,14 +6791,26 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Expr *DeductionArg = Arg;
if (auto *PE = dyn_cast<PackExpansionExpr>(DeductionArg))
DeductionArg = PE->getPattern();
- if (DeduceAutoType(
- Context.getTrivialTypeSourceInfo(ParamType, Param->getLocation()),
- DeductionArg, ParamType, Depth,
- // We do not check constraints right now because the
- // immediately-declared constraint of the auto type is also an
- // associated constraint, and will be checked along with the other
- // associated constraints after checking the template argument list.
- /*IgnoreConstraints=*/true) == DAR_Failed) {
+ TypeSourceInfo *TSI =
+ Context.getTrivialTypeSourceInfo(ParamType, Param->getLocation());
+ if (isa<DeducedTemplateSpecializationType>(DeducedT)) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeTemplateParameter(ParamType, Param);
+ InitializationKind Kind = InitializationKind::CreateForInit(
+ DeductionArg->getBeginLoc(), /*DirectInit*/false, DeductionArg);
+ Expr *Inits[1] = {DeductionArg};
+ ParamType =
+ DeduceTemplateSpecializationFromInitializer(TSI, Entity, Kind, Inits);
+ if (ParamType.isNull())
+ return ExprError();
+ } else if (DeduceAutoType(
+ TSI, DeductionArg, ParamType, Depth,
+ // We do not check constraints right now because the
+ // immediately-declared constraint of the auto type is also
+ // an associated constraint, and will be checked along with
+ // the other associated constraints after checking the
+ // template argument list.
+ /*IgnoreConstraints=*/true) == DAR_Failed) {
Diag(Arg->getExprLoc(),
diag::err_non_type_template_parm_type_deduction_failure)
<< Param->getDeclName() << Param->getType() << Arg->getType()
@@ -6742,18 +6833,21 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
assert(!ParamType.hasQualifiers() &&
"non-type template parameter type cannot be qualified");
+ // FIXME: When Param is a reference, should we check that Arg is an lvalue?
if (CTAK == CTAK_Deduced &&
- !Context.hasSameType(ParamType.getNonLValueExprType(Context),
- Arg->getType())) {
+ (ParamType->isReferenceType()
+ ? !Context.hasSameType(ParamType.getNonReferenceType(),
+ Arg->getType())
+ : !Context.hasSameUnqualifiedType(ParamType, Arg->getType()))) {
// FIXME: If either type is dependent, we skip the check. This isn't
// correct, since during deduction we're supposed to have replaced each
// template parameter with some unique (non-dependent) placeholder.
// FIXME: If the argument type contains 'auto', we carry on and fail the
// type check in order to force specific types to be more specialized than
// 'auto'. It's not clear how partial ordering with 'auto' is supposed to
- // work.
+ // work. Similarly for CTAD, when comparing 'A<x>' against 'A'.
if ((ParamType->isDependentType() || Arg->isTypeDependent()) &&
- !Arg->getType()->getContainedAutoType()) {
+ !Arg->getType()->getContainedDeducedType()) {
Converted = TemplateArgument(Arg);
return Arg;
}
@@ -6800,12 +6894,36 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
*this, Sema::ExpressionEvaluationContext::ConstantEvaluated);
if (getLangOpts().CPlusPlus17) {
+ QualType CanonParamType = Context.getCanonicalType(ParamType);
+
+ // Avoid making a copy when initializing a template parameter of class type
+ // from a template parameter object of the same type. This is going beyond
+ // the standard, but is required for soundness: in
+ // template<A a> struct X { X *p; X<a> *q; };
+ // ... we need p and q to have the same type.
+ //
+ // Similarly, don't inject a call to a copy constructor when initializing
+ // from a template parameter of the same type.
+ Expr *InnerArg = Arg->IgnoreParenImpCasts();
+ if (ParamType->isRecordType() && isa<DeclRefExpr>(InnerArg) &&
+ Context.hasSameUnqualifiedType(ParamType, InnerArg->getType())) {
+ NamedDecl *ND = cast<DeclRefExpr>(InnerArg)->getDecl();
+ if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
+ Converted = TemplateArgument(TPO, CanonParamType);
+ return Arg;
+ }
+ if (isa<NonTypeTemplateParmDecl>(ND)) {
+ Converted = TemplateArgument(Arg);
+ return Arg;
+ }
+ }
+
// C++17 [temp.arg.nontype]p1:
// A template-argument for a non-type template parameter shall be
// a converted constant expression of the type of the template-parameter.
APValue Value;
ExprResult ArgResult = CheckConvertedConstantExpression(
- Arg, ParamType, Value, CCEK_TemplateArg);
+ Arg, ParamType, Value, CCEK_TemplateArg, Param);
if (ArgResult.isInvalid())
return ExprError();
@@ -6816,8 +6934,6 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
return ArgResult;
}
- QualType CanonParamType = Context.getCanonicalType(ParamType);
-
// Convert the APValue to a TemplateArgument.
switch (Value.getKind()) {
case APValue::None:
@@ -6865,6 +6981,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
return ExprError();
}
// -- a subobject
+ // FIXME: Until C++20
if (Value.hasLValuePath() && Value.getLValuePath().size() == 1 &&
VD && VD->getType()->isArrayType() &&
Value.getLValuePath()[0].getAsArrayIndex() == 0 &&
@@ -6886,6 +7003,13 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
: TemplateArgument(CanonParamType, /*isNullPtr*/true);
break;
}
+ case APValue::Struct:
+ case APValue::Union:
+ // Get or create the corresponding template parameter object.
+ Converted = TemplateArgument(
+ Context.getTemplateParamObjectDecl(CanonParamType, Value),
+ CanonParamType);
+ break;
case APValue::AddrLabelDiff:
return Diag(StartLoc, diag::err_non_type_template_arg_addr_label_diff);
case APValue::FixedPoint:
@@ -6894,9 +7018,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
case APValue::ComplexFloat:
case APValue::Vector:
case APValue::Array:
- case APValue::Struct:
- case APValue::Union:
- llvm_unreachable("invalid kind for template argument");
+ return Diag(StartLoc, diag::err_non_type_template_arg_unsupported)
+ << ParamType;
}
return ArgResult.get();
@@ -6982,14 +7105,13 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
public:
TmplArgICEDiagnoser(QualType T) : T(T) { }
- void diagnoseNotICE(Sema &S, SourceLocation Loc,
- SourceRange SR) override {
- S.Diag(Loc, diag::err_template_arg_not_ice) << T << SR;
+ SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
+ SourceLocation Loc) override {
+ return S.Diag(Loc, diag::err_template_arg_not_ice) << T;
}
} Diagnoser(ArgType);
- Arg = VerifyIntegerConstantExpression(Arg, &Value, Diagnoser,
- false).get();
+ Arg = VerifyIntegerConstantExpression(Arg, &Value, Diagnoser).get();
if (!Arg)
return ExprError();
}
@@ -7389,6 +7511,11 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
RefExpr = CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get());
if (RefExpr.isInvalid())
return ExprError();
+ } else if (ParamType->isRecordType()) {
+ assert(isa<TemplateParamObjectDecl>(VD) &&
+ "arg for class template param not a template parameter object");
+ // No conversions apply in this case.
+ return RefExpr;
} else {
assert(ParamType->isReferenceType() &&
"unexpected type for decl template argument");
@@ -7477,7 +7604,7 @@ Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
// FIXME: This is a hack. We need a better way to handle substituted
// non-type template parameters.
E = CStyleCastExpr::Create(Context, OrigT, VK_RValue, CK_IntegralCast, E,
- nullptr,
+ nullptr, CurFPFeatureOverrides(),
Context.getTrivialTypeSourceInfo(OrigT, Loc),
Loc, Loc);
}
@@ -7771,22 +7898,28 @@ Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) {
(S->getFlags() & Scope::TemplateParamScope) != 0)
S = S->getParent();
- // C++ [temp]p4:
- // A template [...] shall not have C linkage.
+ // C++ [temp.pre]p6: [P2096]
+ // A template, explicit specialization, or partial specialization shall not
+ // have C linkage.
DeclContext *Ctx = S->getEntity();
- assert(Ctx && "Unknown context");
- if (Ctx->isExternCContext()) {
+ if (Ctx && Ctx->isExternCContext()) {
Diag(TemplateParams->getTemplateLoc(), diag::err_template_linkage)
<< TemplateParams->getSourceRange();
if (const LinkageSpecDecl *LSD = Ctx->getExternCContext())
Diag(LSD->getExternLoc(), diag::note_extern_c_begins_here);
return true;
}
- Ctx = Ctx->getRedeclContext();
+ Ctx = Ctx ? Ctx->getRedeclContext() : nullptr;
// C++ [temp]p2:
// A template-declaration can appear only as a namespace scope or
// class scope declaration.
+ // C++ [temp.expl.spec]p3:
+ // An explicit specialization may be declared in any scope in which the
+ // corresponding primary template may be defined.
+ // C++ [temp.class.spec]p6: [P2096]
+ // A partial specialization may be declared in any scope in which the
+ // corresponding primary template may be defined.
if (Ctx) {
if (Ctx->isFileContext())
return false;
@@ -8106,6 +8239,10 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (Invalid)
return true;
+ // Check that we can declare a template specialization here.
+ if (TemplateParams && CheckTemplateDeclScope(S, TemplateParams))
+ return true;
+
if (TemplateParams && TemplateParams->size() > 0) {
isPartialSpecialization = true;
@@ -8198,10 +8335,9 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// FIXME: Move this to CheckTemplatePartialSpecializationArgs so we
// also do it during instantiation.
- bool InstantiationDependent;
if (!Name.isDependent() &&
- !TemplateSpecializationType::anyDependentTemplateArguments(
- TemplateArgs.arguments(), InstantiationDependent)) {
+ !TemplateSpecializationType::anyDependentTemplateArguments(TemplateArgs,
+ Converted)) {
Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
<< ClassTemplate->getDeclName();
isPartialSpecialization = false;
@@ -8462,6 +8598,9 @@ Decl *Sema::ActOnConceptDefinition(Scope *S,
return nullptr;
}
+ if (DiagnoseUnexpandedParameterPack(ConstraintExpr))
+ return nullptr;
+
ConceptDecl *NewDecl = ConceptDecl::Create(Context, DC, NameLoc, Name,
TemplateParameterLists.front(),
ConstraintExpr);
@@ -9580,11 +9719,11 @@ DeclResult Sema::ActOnExplicitInstantiation(
Def->setTemplateSpecializationKind(TSK);
if (!getDLLAttr(Def) && getDLLAttr(Specialization) &&
- (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
- Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment())) {
- // In the MS ABI, an explicit instantiation definition can add a dll
- // attribute to a template with a previous instantiation declaration.
- // MinGW doesn't allow this.
+ (Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
+ !Context.getTargetInfo().getTriple().isPS4CPU())) {
+ // An explicit instantiation definition can add a dll attribute to a
+ // template with a previous instantiation declaration. MinGW doesn't
+ // allow this.
auto *A = cast<InheritableAttr>(
getDLLAttr(Specialization)->clone(getASTContext()));
A->setInherited(true);
@@ -9598,19 +9737,19 @@ DeclResult Sema::ActOnExplicitInstantiation(
bool NewlyDLLExported =
!PreviouslyDLLExported && Specialization->hasAttr<DLLExportAttr>();
if (Old_TSK == TSK_ImplicitInstantiation && NewlyDLLExported &&
- (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
- Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment())) {
- // In the MS ABI, an explicit instantiation definition can add a dll
- // attribute to a template with a previous implicit instantiation.
- // MinGW doesn't allow this. We limit clang to only adding dllexport, to
- // avoid potentially strange codegen behavior. For example, if we extend
- // this conditional to dllimport, and we have a source file calling a
- // method on an implicitly instantiated template class instance and then
- // declaring a dllimport explicit instantiation definition for the same
- // template class, the codegen for the method call will not respect the
- // dllimport, while it will with cl. The Def will already have the DLL
- // attribute, since the Def and Specialization will be the same in the
- // case of Old_TSK == TSK_ImplicitInstantiation, and we already added the
+ (Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
+ !Context.getTargetInfo().getTriple().isPS4CPU())) {
+ // An explicit instantiation definition can add a dll attribute to a
+ // template with a previous implicit instantiation. MinGW doesn't allow
+ // this. We limit clang to only adding dllexport, to avoid potentially
+ // strange codegen behavior. For example, if we extend this conditional
+ // to dllimport, and we have a source file calling a method on an
+ // implicitly instantiated template class instance and then declaring a
+ // dllimport explicit instantiation definition for the same template
+ // class, the codegen for the method call will not respect the dllimport,
+ // while it will with cl. The Def will already have the DLL attribute,
+ // since the Def and Specialization will be the same in the case of
+ // Old_TSK == TSK_ImplicitInstantiation, and we already added the
// attribute to the Specialization; we just need to make it take effect.
assert(Def == Specialization &&
"Def and Specialization should match for implicit instantiation");
@@ -9625,6 +9764,11 @@ DeclResult Sema::ActOnExplicitInstantiation(
dllExportImportClassTemplateSpecialization(*this, Def);
}
+ if (Def->hasAttr<MSInheritanceAttr>()) {
+ Specialization->addAttr(Def->getAttr<MSInheritanceAttr>());
+ Consumer.AssignInheritanceModel(Specialization);
+ }
+
// Set the template specialization kind. Make sure it is set before
// instantiating the members which will trigger ASTConsumer callbacks.
Specialization->setTemplateSpecializationKind(TSK);
@@ -9904,6 +10048,14 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
if (Res.isInvalid())
return true;
+ if (!Res.isUsable()) {
+ // We somehow specified dependent template arguments in an explicit
+ // instantiation. This should probably only happen during error
+ // recovery.
+ Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_dependent);
+ return true;
+ }
+
// Ignore access control bits, we don't need them for redeclaration
// checking.
Prev = cast<VarDecl>(Res.get());
@@ -10583,7 +10735,7 @@ namespace {
/// For the purposes of type reconstruction, a type has already been
/// transformed if it is NULL or if it is not dependent.
bool AlreadyTransformed(QualType T) {
- return T.isNull() || !T->isDependentType();
+ return T.isNull() || !T->isInstantiationDependentType();
}
/// Returns the location of the entity whose type is being
@@ -10636,7 +10788,7 @@ namespace {
TypeSourceInfo *Sema::RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name) {
- if (!T || !T->getType()->isDependentType())
+ if (!T || !T->getType()->isInstantiationDependentType())
return T;
CurrentInstantiationRebuilder Rebuilder(*this, Loc, Name);
@@ -10926,14 +11078,3 @@ void Sema::checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec) {
ExplicitSpecializationVisibilityChecker(*this, Loc).check(Spec);
}
-
-/// Check whether a template partial specialization that we've discovered
-/// is hidden, and produce suitable diagnostics if so.
-void Sema::checkPartialSpecializationVisibility(SourceLocation Loc,
- NamedDecl *Spec) {
- llvm::SmallVector<Module *, 8> Modules;
- if (!hasVisibleDeclaration(Spec, &Modules))
- diagnoseMissingImport(Loc, Spec, Spec->getLocation(), Modules,
- MissingImportKind::PartialSpecialization,
- /*Recover*/true);
-}
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index 5392be57a3aa..ee4316e7a632 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -171,30 +171,41 @@ static void MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
/// If the given expression is of a form that permits the deduction
/// of a non-type template parameter, return the declaration of that
/// non-type template parameter.
-static NonTypeTemplateParmDecl *
-getDeducedParameterFromExpr(TemplateDeductionInfo &Info, Expr *E) {
+static const NonTypeTemplateParmDecl *
+getDeducedParameterFromExpr(const Expr *E, unsigned Depth) {
// If we are within an alias template, the expression may have undergone
// any number of parameter substitutions already.
while (true) {
- if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(E))
+ if (const auto *IC = dyn_cast<ImplicitCastExpr>(E))
E = IC->getSubExpr();
- else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(E))
+ else if (const auto *CE = dyn_cast<ConstantExpr>(E))
E = CE->getSubExpr();
- else if (SubstNonTypeTemplateParmExpr *Subst =
- dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ else if (const auto *Subst = dyn_cast<SubstNonTypeTemplateParmExpr>(E))
E = Subst->getReplacement();
- else
+ else if (const auto *CCE = dyn_cast<CXXConstructExpr>(E)) {
+ // Look through implicit copy construction from an lvalue of the same type.
+ if (CCE->getParenOrBraceRange().isValid())
+ break;
+ // Note, there could be default arguments.
+ assert(CCE->getNumArgs() >= 1 && "implicit construct expr should have 1 arg");
+ E = CCE->getArg(0);
+ } else
break;
}
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
- if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl()))
- if (NTTP->getDepth() == Info.getDeducedDepth())
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl()))
+ if (NTTP->getDepth() == Depth)
return NTTP;
return nullptr;
}
+static const NonTypeTemplateParmDecl *
+getDeducedParameterFromExpr(TemplateDeductionInfo &Info, Expr *E) {
+ return getDeducedParameterFromExpr(E, Info.getDeducedDepth());
+}
+
/// Determine whether two declaration pointers refer to the same
/// declaration.
static bool isSameDeclaration(Decl *X, Decl *Y) {
@@ -374,7 +385,7 @@ checkDeducedTemplateArguments(ASTContext &Context,
/// deduction is funneled through here.
static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
- NonTypeTemplateParmDecl *NTTP, const DeducedTemplateArgument &NewDeduced,
+ const NonTypeTemplateParmDecl *NTTP, const DeducedTemplateArgument &NewDeduced,
QualType ValueType, TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
assert(NTTP->getDepth() == Info.getDeducedDepth() &&
@@ -383,7 +394,7 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
DeducedTemplateArgument Result = checkDeducedTemplateArguments(
S.Context, Deduced[NTTP->getIndex()], NewDeduced);
if (Result.isNull()) {
- Info.Param = NTTP;
+ Info.Param = const_cast<NonTypeTemplateParmDecl*>(NTTP);
Info.FirstArg = Deduced[NTTP->getIndex()];
Info.SecondArg = NewDeduced;
return Sema::TDK_Inconsistent;
@@ -410,10 +421,16 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
// type from an argument (of non-reference type) should be performed.
// For now, we just remove reference types from both sides and let
// the final check for matching types sort out the mess.
+ ValueType = ValueType.getNonReferenceType();
+ if (ParamType->isReferenceType())
+ ParamType = ParamType.getNonReferenceType();
+ else
+ // Top-level cv-qualifiers are irrelevant for a non-reference type.
+ ValueType = ValueType.getUnqualifiedType();
+
return DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, ParamType.getNonReferenceType(),
- ValueType.getNonReferenceType(), Info, Deduced, TDF_SkipNonDependent,
- /*PartialOrdering=*/false,
+ S, TemplateParams, ParamType, ValueType, Info, Deduced,
+ TDF_SkipNonDependent, /*PartialOrdering=*/false,
/*ArrayBound=*/NewDeduced.wasDeducedFromArrayBound());
}
@@ -421,7 +438,7 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
/// from the given integral constant.
static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
- NonTypeTemplateParmDecl *NTTP, const llvm::APSInt &Value,
+ const NonTypeTemplateParmDecl *NTTP, const llvm::APSInt &Value,
QualType ValueType, bool DeducedFromArrayBound, TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
return DeduceNonTypeTemplateArgument(
@@ -435,7 +452,7 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
/// from the given null pointer template argument type.
static Sema::TemplateDeductionResult DeduceNullPtrTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
- NonTypeTemplateParmDecl *NTTP, QualType NullPtrType,
+ const NonTypeTemplateParmDecl *NTTP, QualType NullPtrType,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
Expr *Value =
@@ -454,7 +471,7 @@ static Sema::TemplateDeductionResult DeduceNullPtrTemplateArgument(
/// \returns true if deduction succeeded, false otherwise.
static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
- NonTypeTemplateParmDecl *NTTP, Expr *Value, TemplateDeductionInfo &Info,
+ const NonTypeTemplateParmDecl *NTTP, Expr *Value, TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
DeducedTemplateArgument(Value),
@@ -467,7 +484,7 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
/// \returns true if deduction succeeded, false otherwise.
static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
- NonTypeTemplateParmDecl *NTTP, ValueDecl *D, QualType T,
+ const NonTypeTemplateParmDecl *NTTP, ValueDecl *D, QualType T,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
D = D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
@@ -641,23 +658,6 @@ static TemplateParameter makeTemplateParameter(Decl *D) {
return TemplateParameter(cast<TemplateTemplateParmDecl>(D));
}
-/// If \p Param is an expanded parameter pack, get the number of expansions.
-static Optional<unsigned> getExpandedPackSize(NamedDecl *Param) {
- if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
- if (TTP->isExpandedParameterPack())
- return TTP->getNumExpansionParameters();
-
- if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param))
- if (NTTP->isExpandedParameterPack())
- return NTTP->getNumExpansionTypes();
-
- if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Param))
- if (TTP->isExpandedParameterPack())
- return TTP->getNumExpansionTemplateParameters();
-
- return None;
-}
-
/// A pack that we're currently deducing.
struct clang::DeducedPack {
// The index of the pack.
@@ -1201,6 +1201,120 @@ static bool isForwardingReference(QualType Param, unsigned FirstInnerIndex) {
return false;
}
+/// Attempt to deduce the template arguments by checking the base types
+/// according to (C++20 [temp.deduct.call] p4b3.
+///
+/// \param S the semantic analysis object within which we are deducing.
+///
+/// \param RecordT the top level record object we are deducing against.
+///
+/// \param TemplateParams the template parameters that we are deducing.
+///
+/// \param SpecParam the template specialization parameter type.
+///
+/// \param Info information about the template argument deduction itself.
+///
+/// \param Deduced the deduced template arguments.
+///
+/// \returns the result of template argument deduction with the bases. "invalid"
+/// means no matches, "success" found a single item, and the
+/// "MiscellaneousDeductionFailure" result happens when the match is ambiguous.
+static Sema::TemplateDeductionResult DeduceTemplateBases(
+ Sema &S, const RecordType *RecordT, TemplateParameterList *TemplateParams,
+ const TemplateSpecializationType *SpecParam, TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ // C++14 [temp.deduct.call] p4b3:
+ // If P is a class and P has the form simple-template-id, then the
+ // transformed A can be a derived class of the deduced A. Likewise if
+ // P is a pointer to a class of the form simple-template-id, the
+ // transformed A can be a pointer to a derived class pointed to by the
+ // deduced A. However, if there is a class C that is a (direct or
+ // indirect) base class of D and derived (directly or indirectly) from a
+ // class B and that would be a valid deduced A, the deduced A cannot be
+ // B or pointer to B, respectively.
+ //
+ // These alternatives are considered only if type deduction would
+ // otherwise fail. If they yield more than one possible deduced A, the
+ // type deduction fails.
+
+ // Use a breadth-first search through the bases to collect the set of
+ // successful matches. Visited contains the set of nodes we have already
+ // visited, while ToVisit is our stack of records that we still need to
+ // visit. Matches contains a list of matches that have yet to be
+ // disqualified.
+ llvm::SmallPtrSet<const RecordType *, 8> Visited;
+ SmallVector<const RecordType *, 8> ToVisit;
+ // We iterate over this later, so we have to use MapVector to ensure
+ // determinism.
+ llvm::MapVector<const RecordType *, SmallVector<DeducedTemplateArgument, 8>>
+ Matches;
+
+ auto AddBases = [&Visited, &ToVisit](const RecordType *RT) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ for (const auto &Base : RD->bases()) {
+ assert(Base.getType()->isRecordType() &&
+ "Base class that isn't a record?");
+ const RecordType *RT = Base.getType()->getAs<RecordType>();
+ if (Visited.insert(RT).second)
+ ToVisit.push_back(Base.getType()->getAs<RecordType>());
+ }
+ };
+
+ // Set up the loop by adding all the bases.
+ AddBases(RecordT);
+
+ // Search each path of bases until we either run into a successful match
+ // (where all bases of it are invalid), or we run out of bases.
+ while (!ToVisit.empty()) {
+ const RecordType *NextT = ToVisit.pop_back_val();
+
+ SmallVector<DeducedTemplateArgument, 8> DeducedCopy(Deduced.begin(),
+ Deduced.end());
+ TemplateDeductionInfo BaseInfo(TemplateDeductionInfo::ForBase, Info);
+ Sema::TemplateDeductionResult BaseResult =
+ DeduceTemplateArguments(S, TemplateParams, SpecParam,
+ QualType(NextT, 0), BaseInfo, DeducedCopy);
+
+ // If this was a successful deduction, add it to the list of matches,
+ // otherwise we need to continue searching its bases.
+ if (BaseResult == Sema::TDK_Success)
+ Matches.insert({NextT, DeducedCopy});
+ else
+ AddBases(NextT);
+ }
+
+ // At this point, 'Matches' contains a list of seemingly valid bases, however
+ // in the event that we have more than 1 match, it is possible that the base
+ // of one of the matches might be disqualified for being a base of another
+ // valid match. We can count on cyclical instantiations being invalid to
+ // simplify the disqualifications. That is, if A & B are both matches, and B
+ // inherits from A (disqualifying A), we know that A cannot inherit from B.
+ if (Matches.size() > 1) {
+ Visited.clear();
+ for (const auto &Match : Matches)
+ AddBases(Match.first);
+
+ // We can give up once we have a single item (or have run out of things to
+ // search) since cyclical inheritence isn't valid.
+ while (Matches.size() > 1 && !ToVisit.empty()) {
+ const RecordType *NextT = ToVisit.pop_back_val();
+ Matches.erase(NextT);
+
+ // Always add all bases, since the inheritence tree can contain
+ // disqualifications for multiple matches.
+ AddBases(NextT);
+ }
+ }
+
+ if (Matches.empty())
+ return Sema::TDK_Invalid;
+ if (Matches.size() > 1)
+ return Sema::TDK_MiscellaneousDeductionFailure;
+
+ std::swap(Matches.front().second, Deduced);
+ return Sema::TDK_Success;
+}
+
/// Deduce the template arguments by comparing the parameter type and
/// the argument type (C++ [temp.deduct.type]).
///
@@ -1484,14 +1598,18 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Sema::TDK_Success;
}
} else if (!Param->isDependentType()) {
- CanQualType ParamUnqualType = CanParam.getUnqualifiedType(),
- ArgUnqualType = CanArg.getUnqualifiedType();
- bool Success =
- (TDF & TDF_AllowCompatibleFunctionType)
- ? S.isSameOrCompatibleFunctionType(ParamUnqualType, ArgUnqualType)
- : ParamUnqualType == ArgUnqualType;
- if (Success)
+ if (!(TDF & TDF_SkipNonDependent)) {
+ CanQualType ParamUnqualType = CanParam.getUnqualifiedType(),
+ ArgUnqualType = CanArg.getUnqualifiedType();
+ bool Success =
+ (TDF & TDF_AllowCompatibleFunctionType)
+ ? S.isSameOrCompatibleFunctionType(ParamUnqualType, ArgUnqualType)
+ : ParamUnqualType == ArgUnqualType;
+ if (Success)
+ return Sema::TDK_Success;
+ } else {
return Sema::TDK_Success;
+ }
}
switch (Param->getTypeClass()) {
@@ -1643,7 +1761,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Result;
// Determine the array bound is something we can deduce.
- NonTypeTemplateParmDecl *NTTP
+ const NonTypeTemplateParmDecl *NTTP
= getDeducedParameterFromExpr(Info, DependentArrayParm->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
@@ -1712,7 +1830,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// deducing through the noexcept-specifier if it's part of the canonical
// type. libstdc++ relies on this.
Expr *NoexceptExpr = FunctionProtoParam->getNoexceptExpr();
- if (NonTypeTemplateParmDecl *NTTP =
+ if (const NonTypeTemplateParmDecl *NTTP =
NoexceptExpr ? getDeducedParameterFromExpr(Info, NoexceptExpr)
: nullptr) {
assert(NTTP->getDepth() == Info.getDeducedDepth() &&
@@ -1787,78 +1905,15 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
if (!S.isCompleteType(Info.getLocation(), Arg))
return Result;
- // C++14 [temp.deduct.call] p4b3:
- // If P is a class and P has the form simple-template-id, then the
- // transformed A can be a derived class of the deduced A. Likewise if
- // P is a pointer to a class of the form simple-template-id, the
- // transformed A can be a pointer to a derived class pointed to by the
- // deduced A.
- //
- // These alternatives are considered only if type deduction would
- // otherwise fail. If they yield more than one possible deduced A, the
- // type deduction fails.
-
// Reset the incorrectly deduced argument from above.
Deduced = DeducedOrig;
- // Use data recursion to crawl through the list of base classes.
- // Visited contains the set of nodes we have already visited, while
- // ToVisit is our stack of records that we still need to visit.
- llvm::SmallPtrSet<const RecordType *, 8> Visited;
- SmallVector<const RecordType *, 8> ToVisit;
- ToVisit.push_back(RecordT);
- bool Successful = false;
- SmallVector<DeducedTemplateArgument, 8> SuccessfulDeduced;
- while (!ToVisit.empty()) {
- // Retrieve the next class in the inheritance hierarchy.
- const RecordType *NextT = ToVisit.pop_back_val();
-
- // If we have already seen this type, skip it.
- if (!Visited.insert(NextT).second)
- continue;
-
- // If this is a base class, try to perform template argument
- // deduction from it.
- if (NextT != RecordT) {
- TemplateDeductionInfo BaseInfo(TemplateDeductionInfo::ForBase, Info);
- Sema::TemplateDeductionResult BaseResult =
- DeduceTemplateArguments(S, TemplateParams, SpecParam,
- QualType(NextT, 0), BaseInfo, Deduced);
-
- // If template argument deduction for this base was successful,
- // note that we had some success. Otherwise, ignore any deductions
- // from this base class.
- if (BaseResult == Sema::TDK_Success) {
- // If we've already seen some success, then deduction fails due to
- // an ambiguity (temp.deduct.call p5).
- if (Successful)
- return Sema::TDK_MiscellaneousDeductionFailure;
-
- Successful = true;
- std::swap(SuccessfulDeduced, Deduced);
-
- Info.Param = BaseInfo.Param;
- Info.FirstArg = BaseInfo.FirstArg;
- Info.SecondArg = BaseInfo.SecondArg;
- }
-
- Deduced = DeducedOrig;
- }
-
- // Visit base classes
- CXXRecordDecl *Next = cast<CXXRecordDecl>(NextT->getDecl());
- for (const auto &Base : Next->bases()) {
- assert(Base.getType()->isRecordType() &&
- "Base class that isn't a record?");
- ToVisit.push_back(Base.getType()->getAs<RecordType>());
- }
- }
-
- if (Successful) {
- std::swap(SuccessfulDeduced, Deduced);
- return Sema::TDK_Success;
- }
+ // Check bases according to C++14 [temp.deduct.call] p4b3:
+ Sema::TemplateDeductionResult BaseResult = DeduceTemplateBases(
+ S, RecordT, TemplateParams, SpecParam, Info, Deduced);
+ if (BaseResult != Sema::TDK_Invalid)
+ return BaseResult;
return Result;
}
@@ -1964,7 +2019,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Result;
// Perform deduction on the vector size, if we can.
- NonTypeTemplateParmDecl *NTTP =
+ const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
@@ -1988,7 +2043,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Result;
// Perform deduction on the vector size, if we can.
- NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(
+ const NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(
Info, VectorParam->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
@@ -2017,8 +2072,8 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Result;
// Perform deduction on the vector size, if we can.
- NonTypeTemplateParmDecl *NTTP
- = getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
+ const NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
@@ -2043,8 +2098,8 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Result;
// Perform deduction on the vector size, if we can.
- NonTypeTemplateParmDecl *NTTP
- = getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
+ const NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
@@ -2100,28 +2155,27 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
const auto *ArgConstMatrix = dyn_cast<ConstantMatrixType>(Arg);
const auto *ArgDepMatrix = dyn_cast<DependentSizedMatrixType>(Arg);
if (!ParamExpr->isValueDependent()) {
- llvm::APSInt ParamConst(
- S.Context.getTypeSize(S.Context.getSizeType()));
- if (!ParamExpr->isIntegerConstantExpr(ParamConst, S.Context))
+ Optional<llvm::APSInt> ParamConst =
+ ParamExpr->getIntegerConstantExpr(S.Context);
+ if (!ParamConst)
return Sema::TDK_NonDeducedMismatch;
if (ArgConstMatrix) {
- if ((ArgConstMatrix->*GetArgDimension)() == ParamConst)
+ if ((ArgConstMatrix->*GetArgDimension)() == *ParamConst)
return Sema::TDK_Success;
return Sema::TDK_NonDeducedMismatch;
}
Expr *ArgExpr = (ArgDepMatrix->*GetArgDimensionExpr)();
- llvm::APSInt ArgConst(
- S.Context.getTypeSize(S.Context.getSizeType()));
- if (!ArgExpr->isValueDependent() &&
- ArgExpr->isIntegerConstantExpr(ArgConst, S.Context) &&
- ArgConst == ParamConst)
- return Sema::TDK_Success;
+ if (!ArgExpr->isValueDependent())
+ if (Optional<llvm::APSInt> ArgConst =
+ ArgExpr->getIntegerConstantExpr(S.Context))
+ if (*ArgConst == *ParamConst)
+ return Sema::TDK_Success;
return Sema::TDK_NonDeducedMismatch;
}
- NonTypeTemplateParmDecl *NTTP =
+ const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, ParamExpr);
if (!NTTP)
return Sema::TDK_Success;
@@ -2168,7 +2222,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Result;
// Perform deduction on the address space, if we can.
- NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(
+ const NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(
Info, AddressSpaceParam->getAddrSpaceExpr());
if (!NTTP)
return Sema::TDK_Success;
@@ -2191,7 +2245,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Result;
// Perform deduction on the address space, if we can.
- NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(
+ const NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(
Info, AddressSpaceParam->getAddrSpaceExpr());
if (!NTTP)
return Sema::TDK_Success;
@@ -2210,7 +2264,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
if (IntParam->isUnsigned() != IntArg->isUnsigned())
return Sema::TDK_NonDeducedMismatch;
- NonTypeTemplateParmDecl *NTTP =
+ const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, IntParam->getNumBitsExpr());
if (!NTTP)
return Sema::TDK_Success;
@@ -2327,8 +2381,8 @@ DeduceTemplateArguments(Sema &S,
return Sema::TDK_NonDeducedMismatch;
case TemplateArgument::Expression:
- if (NonTypeTemplateParmDecl *NTTP
- = getDeducedParameterFromExpr(Info, Param.getAsExpr())) {
+ if (const NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, Param.getAsExpr())) {
if (Arg.getKind() == TemplateArgument::Integral)
return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
Arg.getAsIntegral(),
@@ -2620,11 +2674,11 @@ Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
if (Arg.getKind() == TemplateArgument::Template)
- return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(Context),
- Loc);
+ return TemplateArgumentLoc(Context, Arg,
+ Builder.getWithLocInContext(Context), Loc);
- return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(Context),
- Loc, Loc);
+ return TemplateArgumentLoc(
+ Context, Arg, Builder.getWithLocInContext(Context), Loc, Loc);
}
case TemplateArgument::Expression:
@@ -3807,17 +3861,18 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(
if (ParamRefType) {
// If the argument has incomplete array type, try to complete its type.
- if (ArgType->isIncompleteArrayType()) {
- S.completeExprArrayBound(Arg);
- ArgType = Arg->getType();
- }
+ if (ArgType->isIncompleteArrayType())
+ ArgType = S.getCompletedType(Arg);
// C++1z [temp.deduct.call]p3:
// If P is a forwarding reference and the argument is an lvalue, the type
// "lvalue reference to A" is used in place of A for type deduction.
if (isForwardingReference(QualType(ParamRefType, 0), FirstInnerIndex) &&
- Arg->isLValue())
+ Arg->isLValue()) {
+ if (S.getLangOpts().OpenCL)
+ ArgType = S.Context.getAddrSpaceQualType(ArgType, LangAS::opencl_generic);
ArgType = S.Context.getLValueReferenceType(ArgType);
+ }
} else {
// C++ [temp.deduct.call]p2:
// If P is not a reference type:
@@ -3929,7 +3984,7 @@ static Sema::TemplateDeductionResult DeduceFromInitializerList(
// from the length of the initializer list.
if (auto *DependentArrTy = dyn_cast_or_null<DependentSizedArrayType>(ArrTy)) {
// Determine the array bound is something we can deduce.
- if (NonTypeTemplateParmDecl *NTTP =
+ if (const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, DependentArrTy->getSizeExpr())) {
// We can perform template argument deduction for the given non-type
// template parameter.
@@ -4894,6 +4949,13 @@ QualType Sema::ReplaceAutoType(QualType TypeWithAuto,
.TransformType(TypeWithAuto);
}
+TypeSourceInfo *Sema::ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
+ QualType TypeToReplaceAuto) {
+ return SubstituteDeducedTypeTransform(*this, TypeToReplaceAuto,
+ /*UseTypeSugar*/ false)
+ .TransformType(TypeWithAuto);
+}
+
void Sema::DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init) {
if (isa<InitListExpr>(Init))
Diag(VDecl->getLocation(),
@@ -4941,8 +5003,12 @@ bool Sema::DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
"failed to deduce lambda return type");
// Build the new return type from scratch.
+ CallingConv RetTyCC = FD->getReturnType()
+ ->getPointeeType()
+ ->castAs<FunctionType>()
+ ->getCallConv();
QualType RetType = getLambdaConversionFunctionResultType(
- CallOp->getType()->castAs<FunctionProtoType>());
+ CallOp->getType()->castAs<FunctionProtoType>(), RetTyCC);
if (FD->getReturnType()->getAs<PointerType>())
RetType = Context.getPointerType(RetType);
else {
@@ -5668,26 +5734,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
if (const PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(E))
E = Expansion->getPattern();
- // Skip through any implicit casts we added while type-checking, and any
- // substitutions performed by template alias expansion.
- while (true) {
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
- E = ICE->getSubExpr();
- else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(E))
- E = CE->getSubExpr();
- else if (const SubstNonTypeTemplateParmExpr *Subst =
- dyn_cast<SubstNonTypeTemplateParmExpr>(E))
- E = Subst->getReplacement();
- else
- break;
- }
-
- const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
- if (!DRE)
- return;
-
- const NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl());
+ const NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(E, Depth);
if (!NTTP)
return;
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 11e03c517d01..8bd812b39de4 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -141,7 +141,12 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
TSK_ExplicitSpecialization)
break;
- if (const TemplateArgumentList *TemplateArgs
+ if (!RelativeToPrimary && Function->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization) {
+ // This is an implicit instantiation of an explicit specialization. We
+ // don't get any template arguments from this function but might get
+ // some from an enclosing template.
+ } else if (const TemplateArgumentList *TemplateArgs
= Function->getTemplateSpecializationArgs()) {
// Add the template arguments for this specialization.
Result.addOuterTemplateArguments(TemplateArgs);
@@ -237,7 +242,7 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
// error have occurred. Any diagnostics we might have raised will not be
// visible, and we do not need to construct a correct AST.
if (SemaRef.Diags.hasFatalErrorOccurred() &&
- SemaRef.Diags.hasUncompilableErrorOccurred()) {
+ SemaRef.hasUncompilableErrorOccurred()) {
Invalid = true;
return;
}
@@ -256,7 +261,7 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
AlreadyInstantiating = !Inst.Entity ? false :
!SemaRef.InstantiatingSpecializations
- .insert(std::make_pair(Inst.Entity->getCanonicalDecl(), Inst.Kind))
+ .insert({Inst.Entity->getCanonicalDecl(), Inst.Kind})
.second;
atTemplateBegin(SemaRef.TemplateInstCallbacks, SemaRef, Inst);
}
@@ -475,7 +480,7 @@ void Sema::InstantiatingTemplate::Clear() {
auto &Active = SemaRef.CodeSynthesisContexts.back();
if (Active.Entity)
SemaRef.InstantiatingSpecializations.erase(
- std::make_pair(Active.Entity, Active.Kind));
+ {Active.Entity->getCanonicalDecl(), Active.Kind});
}
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef,
@@ -579,7 +584,7 @@ void Sema::PrintInstantiationStack() {
case CodeSynthesisContext::DefaultTemplateArgumentInstantiation: {
TemplateDecl *Template = cast<TemplateDecl>(Active->Template);
- SmallVector<char, 128> TemplateArgsStr;
+ SmallString<128> TemplateArgsStr;
llvm::raw_svector_ostream OS(TemplateArgsStr);
Template->printName(OS);
printTemplateArgumentList(OS, Active->template_arguments(),
@@ -645,7 +650,7 @@ void Sema::PrintInstantiationStack() {
ParmVarDecl *Param = cast<ParmVarDecl>(Active->Entity);
FunctionDecl *FD = cast<FunctionDecl>(Param->getDeclContext());
- SmallVector<char, 128> TemplateArgsStr;
+ SmallString<128> TemplateArgsStr;
llvm::raw_svector_ostream OS(TemplateArgsStr);
FD->printName(OS);
printTemplateArgumentList(OS, Active->template_arguments(),
@@ -797,7 +802,7 @@ void Sema::PrintInstantiationStack() {
assert(isa<FunctionDecl>(Active->Entity));
DiagID = diag::note_checking_constraints_for_function_here;
}
- SmallVector<char, 128> TemplateArgsStr;
+ SmallString<128> TemplateArgsStr;
llvm::raw_svector_ostream OS(TemplateArgsStr);
cast<NamedDecl>(Active->Entity)->printName(OS);
if (!isa<FunctionDecl>(Active->Entity))
@@ -856,6 +861,7 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::DefaultTemplateArgumentInstantiation:
case CodeSynthesisContext::PriorTemplateArgumentSubstitution:
case CodeSynthesisContext::DefaultTemplateArgumentChecking:
+ case CodeSynthesisContext::RewritingOperatorAsSpaceship:
// A default template argument instantiation and substitution into
// template parameters with arguments for prior parameters may or may
// not be a SFINAE context; look further up the stack.
@@ -874,7 +880,6 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::DeclaringSpecialMember:
case CodeSynthesisContext::DeclaringImplicitEqualityComparison:
case CodeSynthesisContext::DefiningSynthesizedFunction:
- case CodeSynthesisContext::RewritingOperatorAsSpaceship:
case CodeSynthesisContext::InitializingStructuredBinding:
case CodeSynthesisContext::MarkingClassDllexported:
// This happens in a context unrelated to template instantiation, so
@@ -1414,47 +1419,11 @@ TemplateName TemplateInstantiator::TransformTemplateName(
AllowInjectedClassName);
}
-static ExprResult TransformUniqueStableName(TemplateInstantiator &TI,
- PredefinedExpr *E) {
- if (E->getIdentKind() == PredefinedExpr::UniqueStableNameType) {
- TypeSourceInfo *Info =
- TI.getDerived().TransformType(E->getTypeSourceInfo());
-
- if (!Info)
- return ExprError();
-
- if (!TI.getDerived().AlwaysRebuild() && Info == E->getTypeSourceInfo())
- return E;
-
- return TI.getSema().BuildUniqueStableName(E->getLocation(), Info);
- }
-
- if (E->getIdentKind() == PredefinedExpr::UniqueStableNameExpr) {
- EnterExpressionEvaluationContext Unevaluated(
- TI.getSema(), Sema::ExpressionEvaluationContext::Unevaluated);
- ExprResult SubExpr = TI.getDerived().TransformExpr(E->getExpr());
-
- if (SubExpr.isInvalid())
- return ExprError();
-
- if (!TI.getDerived().AlwaysRebuild() && SubExpr.get() == E->getExpr())
- return E;
-
- return TI.getSema().BuildUniqueStableName(E->getLocation(), SubExpr.get());
- }
-
- llvm_unreachable("Only valid for UniqueStableNameType/Expr");
-}
-
ExprResult
TemplateInstantiator::TransformPredefinedExpr(PredefinedExpr *E) {
if (!E->isTypeDependent())
return E;
- if (E->getIdentKind() == PredefinedExpr::UniqueStableNameType ||
- E->getIdentKind() == PredefinedExpr::UniqueStableNameExpr)
- return TransformUniqueStableName(*this, E);
-
return getSema().BuildPredefinedExpr(E->getLocation(), E->getIdentKind());
}
@@ -1500,9 +1469,12 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
if (TargetType.isNull())
return ExprError();
+ QualType ExprType = TargetType.getNonLValueExprType(SemaRef.Context);
+ if (TargetType->isRecordType())
+ ExprType.addConst();
+
return new (SemaRef.Context) SubstNonTypeTemplateParmPackExpr(
- TargetType.getNonLValueExprType(SemaRef.Context),
- TargetType->isReferenceType() ? VK_LValue : VK_RValue, NTTP,
+ ExprType, TargetType->isReferenceType() ? VK_LValue : VK_RValue, NTTP,
E->getLocation(), Arg);
}
@@ -1534,15 +1506,39 @@ ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
SourceLocation loc,
TemplateArgument arg) {
ExprResult result;
- QualType type;
- // The template argument itself might be an expression, in which
- // case we just return that expression.
+ // Determine the substituted parameter type. We can usually infer this from
+ // the template argument, but not always.
+ auto SubstParamType = [&] {
+ QualType T;
+ if (parm->isExpandedParameterPack())
+ T = parm->getExpansionType(SemaRef.ArgumentPackSubstitutionIndex);
+ else
+ T = parm->getType();
+ if (parm->isParameterPack() && isa<PackExpansionType>(T))
+ T = cast<PackExpansionType>(T)->getPattern();
+ return SemaRef.SubstType(T, TemplateArgs, loc, parm->getDeclName());
+ };
+
+ bool refParam = false;
+
+ // The template argument itself might be an expression, in which case we just
+ // return that expression. This happens when substituting into an alias
+ // template.
if (arg.getKind() == TemplateArgument::Expression) {
Expr *argExpr = arg.getAsExpr();
result = argExpr;
- type = argExpr->getType();
-
+ if (argExpr->isLValue()) {
+ if (argExpr->getType()->isRecordType()) {
+ // Check whether the parameter was actually a reference.
+ QualType paramType = SubstParamType();
+ if (paramType.isNull())
+ return ExprError();
+ refParam = paramType->isReferenceType();
+ } else {
+ refParam = true;
+ }
+ }
} else if (arg.getKind() == TemplateArgument::Declaration ||
arg.getKind() == TemplateArgument::NullPtr) {
ValueDecl *VD;
@@ -1560,36 +1556,25 @@ ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
VD = nullptr;
}
- // Derive the type we want the substituted decl to have. This had
- // better be non-dependent, or these checks will have serious problems.
- if (parm->isExpandedParameterPack()) {
- type = parm->getExpansionType(SemaRef.ArgumentPackSubstitutionIndex);
- } else if (parm->isParameterPack() &&
- isa<PackExpansionType>(parm->getType())) {
- type = SemaRef.SubstType(
- cast<PackExpansionType>(parm->getType())->getPattern(),
- TemplateArgs, loc, parm->getDeclName());
- } else {
- type = SemaRef.SubstType(VD ? arg.getParamTypeForDecl() : arg.getNullPtrType(),
- TemplateArgs, loc, parm->getDeclName());
- }
- assert(!type.isNull() && "type substitution failed for param type");
- assert(!type->isDependentType() && "param type still dependent");
- result = SemaRef.BuildExpressionFromDeclTemplateArgument(arg, type, loc);
-
- if (!result.isInvalid()) type = result.get()->getType();
+ QualType paramType = VD ? arg.getParamTypeForDecl() : arg.getNullPtrType();
+ assert(!paramType.isNull() && "type substitution failed for param type");
+ assert(!paramType->isDependentType() && "param type still dependent");
+ result = SemaRef.BuildExpressionFromDeclTemplateArgument(arg, paramType, loc);
+ refParam = paramType->isReferenceType();
} else {
result = SemaRef.BuildExpressionFromIntegralTemplateArgument(arg, loc);
-
- // Note that this type can be different from the type of 'result',
- // e.g. if it's an enum type.
- type = arg.getIntegralType();
+ assert(result.isInvalid() ||
+ SemaRef.Context.hasSameType(result.get()->getType(),
+ arg.getIntegralType()));
}
- if (result.isInvalid()) return ExprError();
+
+ if (result.isInvalid())
+ return ExprError();
Expr *resultExpr = result.get();
return new (SemaRef.Context) SubstNonTypeTemplateParmExpr(
- type, resultExpr->getValueKind(), loc, parm, resultExpr);
+ resultExpr->getType(), resultExpr->getValueKind(), loc, parm, refParam,
+ resultExpr);
}
ExprResult
@@ -1610,10 +1595,12 @@ TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr(
ExprResult
TemplateInstantiator::TransformSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
- ExprResult SubstReplacement = TransformExpr(E->getReplacement());
+ ExprResult SubstReplacement = E->getReplacement();
+ if (!isa<ConstantExpr>(SubstReplacement.get()))
+ SubstReplacement = TransformExpr(E->getReplacement());
if (SubstReplacement.isInvalid())
return true;
- QualType SubstType = TransformType(E->getType());
+ QualType SubstType = TransformType(E->getParameterType(getSema().Context));
if (SubstType.isNull())
return true;
// The type may have been previously dependent and not now, which means we
@@ -2732,7 +2719,10 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
// BlockDecls can appear in a default-member-initializer. They must be the
// child of a BlockExpr, so we only know how to instantiate them from there.
- if (isa<BlockDecl>(Member))
+ // Similarly, lambda closure types are recreated when instantiating the
+ // corresponding LambdaExpr.
+ if (isa<BlockDecl>(Member) ||
+ (isa<CXXRecordDecl>(Member) && cast<CXXRecordDecl>(Member)->isLambda()))
continue;
if (Member->isInvalidDecl()) {
@@ -2806,7 +2796,8 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
Attr *NewAttr =
instantiateTemplateAttribute(I->TmplAttr, Context, *this, TemplateArgs);
- I->NewDecl->addAttr(NewAttr);
+ if (NewAttr)
+ I->NewDecl->addAttr(NewAttr);
LocalInstantiationScope::deleteScopes(I->Scope,
Instantiator.getStartingScope());
}
@@ -2858,8 +2849,6 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
SavedContext.pop();
if (!Instantiation->isInvalidDecl()) {
- Consumer.HandleTagDeclDefinition(Instantiation);
-
// Always emit the vtable for an explicit instantiation definition
// of a polymorphic class template specialization. Otherwise, eagerly
// instantiate only constexpr virtual functions in preparation for their use
@@ -2871,6 +2860,8 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
/*ConstexprOnly*/ true);
}
+ Consumer.HandleTagDeclDefinition(Instantiation);
+
return Instantiation->isInvalidDecl();
}
@@ -2972,9 +2963,10 @@ bool Sema::InstantiateInClassInitializer(
RecordDecl *PatternRD = Pattern->getParent();
RecordDecl *OutermostClass = PatternRD->getOuterLexicalRecordContext();
Diag(PointOfInstantiation,
- diag::err_in_class_initializer_not_yet_parsed)
+ diag::err_default_member_initializer_not_yet_parsed)
<< OutermostClass << Pattern;
- Diag(Pattern->getEndLoc(), diag::note_in_class_initializer_not_yet_parsed);
+ Diag(Pattern->getEndLoc(),
+ diag::note_default_member_initializer_not_yet_parsed);
Instantiation->setInvalidDecl();
return true;
}
@@ -2984,7 +2976,7 @@ bool Sema::InstantiateInClassInitializer(
return true;
if (Inst.isAlreadyInstantiating()) {
// Error out if we hit an instantiation cycle for this initializer.
- Diag(PointOfInstantiation, diag::err_in_class_initializer_cycle)
+ Diag(PointOfInstantiation, diag::err_default_member_initializer_cycle)
<< Instantiation;
return true;
}
@@ -3048,14 +3040,16 @@ bool Sema::usesPartialOrExplicitSpecialization(
/// Get the instantiation pattern to use to instantiate the definition of a
/// given ClassTemplateSpecializationDecl (either the pattern of the primary
/// template or of a partial specialization).
-static CXXRecordDecl *
+static ActionResult<CXXRecordDecl *>
getPatternForClassTemplateSpecialization(
Sema &S, SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
- TemplateSpecializationKind TSK, bool Complain) {
+ TemplateSpecializationKind TSK) {
Sema::InstantiatingTemplate Inst(S, PointOfInstantiation, ClassTemplateSpec);
- if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
- return nullptr;
+ if (Inst.isInvalid())
+ return {/*Invalid=*/true};
+ if (Inst.isAlreadyInstantiating())
+ return {/*Invalid=*/false};
llvm::PointerUnion<ClassTemplateDecl *,
ClassTemplatePartialSpecializationDecl *>
@@ -3152,7 +3146,7 @@ getPatternForClassTemplateSpecialization(
<< S.getTemplateArgumentBindingsText(
P->Partial->getTemplateParameters(), *P->Args);
- return nullptr;
+ return {/*Invalid=*/true};
}
}
@@ -3203,14 +3197,15 @@ bool Sema::InstantiateClassTemplateSpecialization(
if (ClassTemplateSpec->isInvalidDecl())
return true;
- CXXRecordDecl *Pattern = getPatternForClassTemplateSpecialization(
- *this, PointOfInstantiation, ClassTemplateSpec, TSK, Complain);
- if (!Pattern)
- return true;
+ ActionResult<CXXRecordDecl *> Pattern =
+ getPatternForClassTemplateSpecialization(*this, PointOfInstantiation,
+ ClassTemplateSpec, TSK);
+ if (!Pattern.isUsable())
+ return Pattern.isInvalid();
- return InstantiateClass(PointOfInstantiation, ClassTemplateSpec, Pattern,
- getTemplateInstantiationArgs(ClassTemplateSpec), TSK,
- Complain);
+ return InstantiateClass(
+ PointOfInstantiation, ClassTemplateSpec, Pattern.get(),
+ getTemplateInstantiationArgs(ClassTemplateSpec), TSK, Complain);
}
/// Instantiates the definitions of all of the member
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 2efb7acb9724..dc1e0ef60cac 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -181,6 +181,22 @@ static void instantiateDependentAllocAlignAttr(
S.AddAllocAlignAttr(New, *Align, Param);
}
+static void instantiateDependentAnnotationAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const AnnotateAttr *Attr, Decl *New) {
+ EnterExpressionEvaluationContext Unevaluated(
+ S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ SmallVector<Expr *, 4> Args;
+ Args.reserve(Attr->args_size());
+ for (auto *E : Attr->args()) {
+ ExprResult Result = S.SubstExpr(E, TemplateArgs);
+ if (!Result.isUsable())
+ return;
+ Args.push_back(Result.get());
+ }
+ S.AddAnnotationAttr(New, *Attr, Attr->getAnnotation(), Args);
+}
+
static Expr *instantiateDependentFunctionAttrCondition(
Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
const Attr *A, Expr *OldCond, const Decl *Tmpl, FunctionDecl *New) {
@@ -417,7 +433,9 @@ static void instantiateOMPDeclareVariantAttr(
if (TI.anyScoreOrCondition(SubstScoreOrConditionExpr))
return;
- // Check function/variant ref.
+ Expr *E = VariantFuncRef.get();
+ // Check function/variant ref for `omp declare variant` but not for `omp
+ // begin declare variant` (which use implicit attributes).
Optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
S.checkOpenMPDeclareVariantFunction(S.ConvertDeclToDeclGroup(New),
VariantFuncRef.get(), TI,
@@ -426,9 +444,42 @@ static void instantiateOMPDeclareVariantAttr(
if (!DeclVarData)
return;
- S.ActOnOpenMPDeclareVariantDirective(DeclVarData.getValue().first,
- DeclVarData.getValue().second, TI,
- Attr.getRange());
+ E = DeclVarData.getValue().second;
+ FD = DeclVarData.getValue().first;
+
+ if (auto *VariantDRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
+ if (auto *VariantFD = dyn_cast<FunctionDecl>(VariantDRE->getDecl())) {
+ if (auto *VariantFTD = VariantFD->getDescribedFunctionTemplate()) {
+ if (!VariantFTD->isThisDeclarationADefinition())
+ return;
+ Sema::TentativeAnalysisScope Trap(S);
+ const TemplateArgumentList *TAL = TemplateArgumentList::CreateCopy(
+ S.Context, TemplateArgs.getInnermost());
+
+ auto *SubstFD = S.InstantiateFunctionDeclaration(VariantFTD, TAL,
+ New->getLocation());
+ if (!SubstFD)
+ return;
+ QualType NewType = S.Context.mergeFunctionTypes(
+ SubstFD->getType(), FD->getType(),
+ /* OfBlockPointer */ false,
+ /* Unqualified */ false, /* AllowCXX */ true);
+ if (NewType.isNull())
+ return;
+ S.InstantiateFunctionDefinition(
+ New->getLocation(), SubstFD, /* Recursive */ true,
+ /* DefinitionRequired */ false, /* AtEndOfTU */ false);
+ SubstFD->setInstantiationIsPending(!SubstFD->isDefined());
+ E = DeclRefExpr::Create(S.Context, NestedNameSpecifierLoc(),
+ SourceLocation(), SubstFD,
+ /* RefersToEnclosingVariableOrCapture */ false,
+ /* NameLoc */ SubstFD->getLocation(),
+ SubstFD->getType(), ExprValueKind::VK_RValue);
+ }
+ }
+ }
+
+ S.ActOnOpenMPDeclareVariantDirective(FD, E, TI, Attr.getRange());
}
static void instantiateDependentAMDGPUFlatWorkGroupSizeAttr(
@@ -497,12 +548,40 @@ static void instantiateDependentAMDGPUWavesPerEUAttr(
S.addAMDGPUWavesPerEUAttr(New, Attr, MinExpr, MaxExpr);
}
+/// Determine whether the attribute A might be relevent to the declaration D.
+/// If not, we can skip instantiating it. The attribute may or may not have
+/// been instantiated yet.
+static bool isRelevantAttr(Sema &S, const Decl *D, const Attr *A) {
+ // 'preferred_name' is only relevant to the matching specialization of the
+ // template.
+ if (const auto *PNA = dyn_cast<PreferredNameAttr>(A)) {
+ QualType T = PNA->getTypedefType();
+ const auto *RD = cast<CXXRecordDecl>(D);
+ if (!T->isDependentType() && !RD->isDependentContext() &&
+ !declaresSameEntity(T->getAsCXXRecordDecl(), RD))
+ return false;
+ for (const auto *ExistingPNA : D->specific_attrs<PreferredNameAttr>())
+ if (S.Context.hasSameType(ExistingPNA->getTypedefType(),
+ PNA->getTypedefType()))
+ return false;
+ return true;
+ }
+
+ return true;
+}
+
void Sema::InstantiateAttrsForDecl(
const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Tmpl,
Decl *New, LateInstantiatedAttrVec *LateAttrs,
LocalInstantiationScope *OuterMostScope) {
if (NamedDecl *ND = dyn_cast<NamedDecl>(New)) {
+ // FIXME: This function is called multiple times for the same template
+ // specialization. We should only instantiate attributes that were added
+ // since the previous instantiation.
for (const auto *TmplAttr : Tmpl->attrs()) {
+ if (!isRelevantAttr(*this, New, TmplAttr))
+ continue;
+
// FIXME: If any of the special case versions from InstantiateAttrs become
// applicable to template declaration, we'll need to add them here.
CXXThisScopeRAII ThisScope(
@@ -511,7 +590,7 @@ void Sema::InstantiateAttrsForDecl(
Attr *NewAttr = sema::instantiateTemplateAttributeForDecl(
TmplAttr, Context, *this, TemplateArgs);
- if (NewAttr)
+ if (NewAttr && isRelevantAttr(*this, New, NewAttr))
New->addAttr(NewAttr);
}
}
@@ -536,6 +615,9 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
LocalInstantiationScope *OuterMostScope) {
for (const auto *TmplAttr : Tmpl->attrs()) {
+ if (!isRelevantAttr(*this, New, TmplAttr))
+ continue;
+
// FIXME: This should be generalized to more than just the AlignedAttr.
const AlignedAttr *Aligned = dyn_cast<AlignedAttr>(TmplAttr);
if (Aligned && Aligned->isAlignmentDependent()) {
@@ -558,6 +640,10 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
continue;
}
+ if (const auto *Annotate = dyn_cast<AnnotateAttr>(TmplAttr)) {
+ instantiateDependentAnnotationAttr(*this, TemplateArgs, Annotate, New);
+ continue;
+ }
if (const auto *EnableIf = dyn_cast<EnableIfAttr>(TmplAttr)) {
instantiateDependentEnableIfAttr(*this, TemplateArgs, EnableIf, Tmpl,
@@ -654,12 +740,32 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
Attr *NewAttr = sema::instantiateTemplateAttribute(TmplAttr, Context,
*this, TemplateArgs);
- if (NewAttr)
+ if (NewAttr && isRelevantAttr(*this, New, TmplAttr))
New->addAttr(NewAttr);
}
}
}
+/// In the MS ABI, we need to instantiate default arguments of dllexported
+/// default constructors along with the constructor definition. This allows IR
+/// gen to emit a constructor closure which calls the default constructor with
+/// its default arguments.
+void Sema::InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor) {
+ assert(Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ Ctor->isDefaultConstructor());
+ unsigned NumParams = Ctor->getNumParams();
+ if (NumParams == 0)
+ return;
+ DLLExportAttr *Attr = Ctor->getAttr<DLLExportAttr>();
+ if (!Attr)
+ return;
+ for (unsigned I = 0; I != NumParams; ++I) {
+ (void)CheckCXXDefaultArgExpr(Attr->getLocation(), Ctor,
+ Ctor->getParamDecl(I));
+ DiscardCleanupsInEvaluationContext();
+ }
+}
+
/// Get the previous declaration of a declaration for the purposes of template
/// instantiation. If this finds a previous declaration, then the previous
/// declaration of the instantiation of D should be an instantiation of the
@@ -702,6 +808,11 @@ Decl *TemplateDeclInstantiator::VisitMSGuidDecl(MSGuidDecl *D) {
llvm_unreachable("GUID declaration cannot be instantiated");
}
+Decl *TemplateDeclInstantiator::VisitTemplateParamObjectDecl(
+ TemplateParamObjectDecl *D) {
+ llvm_unreachable("template parameter objects cannot be instantiated");
+}
+
Decl *
TemplateDeclInstantiator::VisitLabelDecl(LabelDecl *D) {
LabelDecl *Inst = LabelDecl::Create(SemaRef.Context, Owner, D->getLocation(),
@@ -1911,7 +2022,6 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
D->hasWrittenPrototype(), D->getConstexprKind(),
TrailingRequiresClause);
Function->setRangeEnd(D->getSourceRange().getEnd());
- Function->setUsesFPIntrin(D->usesFPIntrin());
}
if (D->isInlined())
@@ -1981,8 +2091,11 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
Function->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
}
- if (isFriend)
+ if (isFriend) {
Function->setObjectOfFriendDecl();
+ if (FunctionTemplateDecl *FT = Function->getDescribedFunctionTemplate())
+ FT->setObjectOfFriendDecl();
+ }
if (InitFunctionInstantiation(Function, D))
Function->setInvalidDecl();
@@ -2053,68 +2166,45 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
// typedef (C++ [dcl.typedef]p4).
if (Previous.isSingleTagDecl())
Previous.clear();
+
+ // Filter out previous declarations that don't match the scope. The only
+ // effect this has is to remove declarations found in inline namespaces
+ // for friend declarations with unqualified names.
+ SemaRef.FilterLookupForScope(Previous, DC, /*Scope*/ nullptr,
+ /*ConsiderLinkage*/ true,
+ QualifierLoc.hasQualifier());
}
SemaRef.CheckFunctionDeclaration(/*Scope*/ nullptr, Function, Previous,
IsExplicitSpecialization);
- NamedDecl *PrincipalDecl = (TemplateParams
- ? cast<NamedDecl>(FunctionTemplate)
- : Function);
-
- // If the original function was part of a friend declaration,
- // inherit its namespace state and add it to the owner.
- if (isFriend) {
- Function->setObjectOfFriendDecl();
- if (FunctionTemplateDecl *FT = Function->getDescribedFunctionTemplate())
- FT->setObjectOfFriendDecl();
- DC->makeDeclVisibleInContext(PrincipalDecl);
-
- bool QueuedInstantiation = false;
-
- // C++11 [temp.friend]p4 (DR329):
- // When a function is defined in a friend function declaration in a class
- // template, the function is instantiated when the function is odr-used.
- // The same restrictions on multiple declarations and definitions that
- // apply to non-template function declarations and definitions also apply
- // to these implicit definitions.
- if (D->isThisDeclarationADefinition()) {
- SemaRef.CheckForFunctionRedefinition(Function);
- if (!Function->isInvalidDecl()) {
- for (auto R : Function->redecls()) {
- if (R == Function)
- continue;
-
- // If some prior declaration of this function has been used, we need
- // to instantiate its definition.
- if (!QueuedInstantiation && R->isUsed(false)) {
- if (MemberSpecializationInfo *MSInfo =
- Function->getMemberSpecializationInfo()) {
- if (MSInfo->getPointOfInstantiation().isInvalid()) {
- SourceLocation Loc = R->getLocation(); // FIXME
- MSInfo->setPointOfInstantiation(Loc);
- SemaRef.PendingLocalImplicitInstantiations.push_back(
- std::make_pair(Function, Loc));
- QueuedInstantiation = true;
- }
- }
- }
- }
+ // Check the template parameter list against the previous declaration. The
+ // goal here is to pick up default arguments added since the friend was
+ // declared; we know the template parameter lists match, since otherwise
+ // we would not have picked this template as the previous declaration.
+ if (isFriend && TemplateParams && FunctionTemplate->getPreviousDecl()) {
+ SemaRef.CheckTemplateParameterList(
+ TemplateParams,
+ FunctionTemplate->getPreviousDecl()->getTemplateParameters(),
+ Function->isThisDeclarationADefinition()
+ ? Sema::TPC_FriendFunctionTemplateDefinition
+ : Sema::TPC_FriendFunctionTemplate);
+ }
+
+ // If we're introducing a friend definition after the first use, trigger
+ // instantiation.
+ // FIXME: If this is a friend function template definition, we should check
+ // to see if any specializations have been used.
+ if (isFriend && D->isThisDeclarationADefinition() && Function->isUsed(false)) {
+ if (MemberSpecializationInfo *MSInfo =
+ Function->getMemberSpecializationInfo()) {
+ if (MSInfo->getPointOfInstantiation().isInvalid()) {
+ SourceLocation Loc = D->getLocation(); // FIXME
+ MSInfo->setPointOfInstantiation(Loc);
+ SemaRef.PendingLocalImplicitInstantiations.push_back(
+ std::make_pair(Function, Loc));
}
}
-
- // Check the template parameter list against the previous declaration. The
- // goal here is to pick up default arguments added since the friend was
- // declared; we know the template parameter lists match, since otherwise
- // we would not have picked this template as the previous declaration.
- if (TemplateParams && FunctionTemplate->getPreviousDecl()) {
- SemaRef.CheckTemplateParameterList(
- TemplateParams,
- FunctionTemplate->getPreviousDecl()->getTemplateParameters(),
- Function->isThisDeclarationADefinition()
- ? Sema::TPC_FriendFunctionTemplateDefinition
- : Sema::TPC_FriendFunctionTemplate);
- }
}
if (D->isExplicitlyDefaulted()) {
@@ -2124,7 +2214,13 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
if (D->isDeleted())
SemaRef.SetDeclDeleted(Function, D->getLocation());
- if (Function->isLocalExternDecl() && !Function->getPreviousDecl())
+ NamedDecl *PrincipalDecl =
+ (TemplateParams ? cast<NamedDecl>(FunctionTemplate) : Function);
+
+ // If this declaration lives in a different context from its lexical context,
+ // add it to the corresponding lookup table.
+ if (isFriend ||
+ (Function->isLocalExternDecl() && !Function->getPreviousDecl()))
DC->makeDeclVisibleInContext(PrincipalDecl);
if (Function->isOverloadedOperator() && !DC->isRecord() &&
@@ -2879,7 +2975,7 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
if (!TName.isNull())
Param->setDefaultArgument(
SemaRef.Context,
- TemplateArgumentLoc(TemplateArgument(TName),
+ TemplateArgumentLoc(SemaRef.Context, TemplateArgument(TName),
D->getDefaultArgument().getTemplateQualifierLoc(),
D->getDefaultArgument().getTemplateNameLoc()));
}
@@ -3326,67 +3422,58 @@ TemplateDeclInstantiator::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
SemaRef.CurrentInstantiationScope->findInstantiationOf(PrevDeclInScope)
->get<Decl *>());
}
- OMPDeclareMapperDecl *NewDMD = SemaRef.ActOnOpenMPDeclareMapperDirectiveStart(
- /*S=*/nullptr, Owner, D->getDeclName(), SubstMapperTy, D->getLocation(),
- VN, D->getAccess(), PrevDeclInScope);
- SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, NewDMD);
- SmallVector<OMPClause *, 6> Clauses;
bool IsCorrect = true;
- if (!RequiresInstantiation) {
- // Copy the mapper variable.
- NewDMD->setMapperVarRef(D->getMapperVarRef());
- // Copy map clauses from the original mapper.
- for (OMPClause *C : D->clauselists())
- Clauses.push_back(C);
- } else {
- // Instantiate the mapper variable.
- DeclarationNameInfo DirName;
- SemaRef.StartOpenMPDSABlock(llvm::omp::OMPD_declare_mapper, DirName,
- /*S=*/nullptr,
- (*D->clauselist_begin())->getBeginLoc());
- SemaRef.ActOnOpenMPDeclareMapperDirectiveVarDecl(
- NewDMD, /*S=*/nullptr, SubstMapperTy, D->getLocation(), VN);
- SemaRef.CurrentInstantiationScope->InstantiatedLocal(
- cast<DeclRefExpr>(D->getMapperVarRef())->getDecl(),
- cast<DeclRefExpr>(NewDMD->getMapperVarRef())->getDecl());
- auto *ThisContext = dyn_cast_or_null<CXXRecordDecl>(Owner);
- Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, Qualifiers(),
- ThisContext);
- // Instantiate map clauses.
- for (OMPClause *C : D->clauselists()) {
- auto *OldC = cast<OMPMapClause>(C);
- SmallVector<Expr *, 4> NewVars;
- for (Expr *OE : OldC->varlists()) {
- Expr *NE = SemaRef.SubstExpr(OE, TemplateArgs).get();
- if (!NE) {
- IsCorrect = false;
- break;
- }
- NewVars.push_back(NE);
- }
- if (!IsCorrect)
+ SmallVector<OMPClause *, 6> Clauses;
+ // Instantiate the mapper variable.
+ DeclarationNameInfo DirName;
+ SemaRef.StartOpenMPDSABlock(llvm::omp::OMPD_declare_mapper, DirName,
+ /*S=*/nullptr,
+ (*D->clauselist_begin())->getBeginLoc());
+ ExprResult MapperVarRef = SemaRef.ActOnOpenMPDeclareMapperDirectiveVarDecl(
+ /*S=*/nullptr, SubstMapperTy, D->getLocation(), VN);
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(
+ cast<DeclRefExpr>(D->getMapperVarRef())->getDecl(),
+ cast<DeclRefExpr>(MapperVarRef.get())->getDecl());
+ auto *ThisContext = dyn_cast_or_null<CXXRecordDecl>(Owner);
+ Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, Qualifiers(),
+ ThisContext);
+ // Instantiate map clauses.
+ for (OMPClause *C : D->clauselists()) {
+ auto *OldC = cast<OMPMapClause>(C);
+ SmallVector<Expr *, 4> NewVars;
+ for (Expr *OE : OldC->varlists()) {
+ Expr *NE = SemaRef.SubstExpr(OE, TemplateArgs).get();
+ if (!NE) {
+ IsCorrect = false;
break;
- NestedNameSpecifierLoc NewQualifierLoc =
- SemaRef.SubstNestedNameSpecifierLoc(OldC->getMapperQualifierLoc(),
- TemplateArgs);
- CXXScopeSpec SS;
- SS.Adopt(NewQualifierLoc);
- DeclarationNameInfo NewNameInfo = SemaRef.SubstDeclarationNameInfo(
- OldC->getMapperIdInfo(), TemplateArgs);
- OMPVarListLocTy Locs(OldC->getBeginLoc(), OldC->getLParenLoc(),
- OldC->getEndLoc());
- OMPClause *NewC = SemaRef.ActOnOpenMPMapClause(
- OldC->getMapTypeModifiers(), OldC->getMapTypeModifiersLoc(), SS,
- NewNameInfo, OldC->getMapType(), OldC->isImplicitMapType(),
- OldC->getMapLoc(), OldC->getColonLoc(), NewVars, Locs);
- Clauses.push_back(NewC);
+ }
+ NewVars.push_back(NE);
}
- SemaRef.EndOpenMPDSABlock(nullptr);
- }
- (void)SemaRef.ActOnOpenMPDeclareMapperDirectiveEnd(NewDMD, /*S=*/nullptr,
- Clauses);
+ if (!IsCorrect)
+ break;
+ NestedNameSpecifierLoc NewQualifierLoc =
+ SemaRef.SubstNestedNameSpecifierLoc(OldC->getMapperQualifierLoc(),
+ TemplateArgs);
+ CXXScopeSpec SS;
+ SS.Adopt(NewQualifierLoc);
+ DeclarationNameInfo NewNameInfo =
+ SemaRef.SubstDeclarationNameInfo(OldC->getMapperIdInfo(), TemplateArgs);
+ OMPVarListLocTy Locs(OldC->getBeginLoc(), OldC->getLParenLoc(),
+ OldC->getEndLoc());
+ OMPClause *NewC = SemaRef.ActOnOpenMPMapClause(
+ OldC->getMapTypeModifiers(), OldC->getMapTypeModifiersLoc(), SS,
+ NewNameInfo, OldC->getMapType(), OldC->isImplicitMapType(),
+ OldC->getMapLoc(), OldC->getColonLoc(), NewVars, Locs);
+ Clauses.push_back(NewC);
+ }
+ SemaRef.EndOpenMPDSABlock(nullptr);
if (!IsCorrect)
return nullptr;
+ Sema::DeclGroupPtrTy DG = SemaRef.ActOnOpenMPDeclareMapperDirective(
+ /*S=*/nullptr, Owner, D->getDeclName(), SubstMapperTy, D->getLocation(),
+ VN, D->getAccess(), MapperVarRef.get(), Clauses, PrevDeclInScope);
+ Decl *NewDMD = DG.get().getSingleDecl();
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, NewDMD);
return NewDMD;
}
@@ -3595,11 +3682,11 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
return nullptr;
return VisitVarTemplateSpecializationDecl(
- InstVarTemplate, D, InsertPos, VarTemplateArgsInfo, Converted, PrevDecl);
+ InstVarTemplate, D, VarTemplateArgsInfo, Converted, PrevDecl);
}
Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
- VarTemplateDecl *VarTemplate, VarDecl *D, void *InsertPos,
+ VarTemplateDecl *VarTemplate, VarDecl *D,
const TemplateArgumentListInfo &TemplateArgsInfo,
ArrayRef<TemplateArgument> Converted,
VarTemplateSpecializationDecl *PrevDecl) {
@@ -3622,8 +3709,11 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
SemaRef.Context, Owner, D->getInnerLocStart(), D->getLocation(),
VarTemplate, DI->getType(), DI, D->getStorageClass(), Converted);
Var->setTemplateArgsInfo(TemplateArgsInfo);
- if (InsertPos)
+ if (!PrevDecl) {
+ void *InsertPos = nullptr;
+ VarTemplate->findSpecialization(Converted, InsertPos);
VarTemplate->AddSpecialization(Var, InsertPos);
+ }
if (SemaRef.getLangOpts().OpenCL)
SemaRef.deduceOpenCLAddressSpace(Var);
@@ -4099,6 +4189,9 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
for (unsigned OldIdx = 0, NumOldParams = OldProtoLoc.getNumParams();
OldIdx != NumOldParams; ++OldIdx) {
ParmVarDecl *OldParam = OldProtoLoc.getParam(OldIdx);
+ if (!OldParam)
+ return nullptr;
+
LocalInstantiationScope *Scope = SemaRef.CurrentInstantiationScope;
Optional<unsigned> NumArgumentsInExpansion;
@@ -4431,6 +4524,8 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
// into a template instantiation for this specific function template
// specialization, which is not a SFINAE context, so that we diagnose any
// further errors in the declaration itself.
+ //
+ // FIXME: This is a hack.
typedef Sema::CodeSynthesisContext ActiveInstType;
ActiveInstType &ActiveInst = SemaRef.CodeSynthesisContexts.back();
if (ActiveInst.Kind == ActiveInstType::ExplicitTemplateArgumentSubstitution ||
@@ -4440,6 +4535,8 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
assert(FunTmpl->getTemplatedDecl() == Tmpl &&
"Deduction from the wrong function template?");
(void) FunTmpl;
+ SemaRef.InstantiatingSpecializations.erase(
+ {ActiveInst.Entity->getCanonicalDecl(), ActiveInst.Kind});
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef, ActiveInst);
ActiveInst.Kind = ActiveInstType::TemplateInstantiation;
ActiveInst.Entity = New;
@@ -4569,27 +4666,6 @@ Sema::InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
return cast_or_null<FunctionDecl>(SubstDecl(FD, FD->getParent(), MArgs));
}
-/// In the MS ABI, we need to instantiate default arguments of dllexported
-/// default constructors along with the constructor definition. This allows IR
-/// gen to emit a constructor closure which calls the default constructor with
-/// its default arguments.
-static void InstantiateDefaultCtorDefaultArgs(Sema &S,
- CXXConstructorDecl *Ctor) {
- assert(S.Context.getTargetInfo().getCXXABI().isMicrosoft() &&
- Ctor->isDefaultConstructor());
- unsigned NumParams = Ctor->getNumParams();
- if (NumParams == 0)
- return;
- DLLExportAttr *Attr = Ctor->getAttr<DLLExportAttr>();
- if (!Attr)
- return;
- for (unsigned I = 0; I != NumParams; ++I) {
- (void)S.CheckCXXDefaultArgExpr(Attr->getLocation(), Ctor,
- Ctor->getParamDecl(I));
- S.DiscardCleanupsInEvaluationContext();
- }
-}
-
/// Instantiate the definition of the given function from its
/// template.
///
@@ -4612,8 +4688,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
bool Recursive,
bool DefinitionRequired,
bool AtEndOfTU) {
- if (Function->isInvalidDecl() || Function->isDefined() ||
- isa<CXXDeductionGuideDecl>(Function))
+ if (Function->isInvalidDecl() || isa<CXXDeductionGuideDecl>(Function))
return;
// Never instantiate an explicit specialization except if it is a class scope
@@ -4623,6 +4698,20 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
if (TSK == TSK_ExplicitSpecialization)
return;
+ // Don't instantiate a definition if we already have one.
+ const FunctionDecl *ExistingDefn = nullptr;
+ if (Function->isDefined(ExistingDefn,
+ /*CheckForPendingFriendDefinition=*/true)) {
+ if (ExistingDefn->isThisDeclarationADefinition())
+ return;
+
+ // If we're asked to instantiate a function whose body comes from an
+ // instantiated friend declaration, attach the instantiated body to the
+ // corresponding declaration of the function.
+ assert(ExistingDefn->isThisDeclarationInstantiatedFromAFriendDefinition());
+ Function = const_cast<FunctionDecl*>(ExistingDefn);
+ }
+
// Find the function body that we'll be substituting.
const FunctionDecl *PatternDecl = Function->getTemplateInstantiationPattern();
assert(PatternDecl && "instantiating a non-template");
@@ -4795,7 +4884,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// default arguments.
if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
Ctor->isDefaultConstructor()) {
- InstantiateDefaultCtorDefaultArgs(*this, Ctor);
+ InstantiateDefaultCtorDefaultArgs(Ctor);
}
}
@@ -4832,7 +4921,7 @@ VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation(
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
- SourceLocation PointOfInstantiation, void *InsertPos,
+ SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs,
LocalInstantiationScope *StartingScope) {
if (FromVar->isInvalidDecl())
@@ -4871,7 +4960,7 @@ VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation(
return cast_or_null<VarTemplateSpecializationDecl>(
Instantiator.VisitVarTemplateSpecializationDecl(
- VarTemplate, FromVar, InsertPos, TemplateArgsInfo, Converted));
+ VarTemplate, FromVar, TemplateArgsInfo, Converted));
}
/// Instantiates a variable template specialization by completing it
@@ -5143,15 +5232,6 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarTemplateSpecializationDecl *VarSpec =
dyn_cast<VarTemplateSpecializationDecl>(Var);
if (VarSpec) {
- // If this is a variable template specialization, make sure that it is
- // non-dependent.
- bool InstantiationDependent = false;
- assert(!TemplateSpecializationType::anyDependentTemplateArguments(
- VarSpec->getTemplateArgsInfo(), InstantiationDependent) &&
- "Only instantiate variable template specializations that are "
- "not type-dependent");
- (void)InstantiationDependent;
-
// If this is a static data member template, there might be an
// uninstantiated initializer on the declaration. If so, instantiate
// it now.
@@ -5303,8 +5383,8 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
TemplateDeclInstantiator Instantiator(*this, Var->getDeclContext(),
TemplateArgs);
Var = cast_or_null<VarDecl>(Instantiator.VisitVarTemplateSpecializationDecl(
- VarSpec->getSpecializedTemplate(), Def, nullptr,
- VarSpec->getTemplateArgsInfo(), VarSpec->getTemplateArgs().asArray()));
+ VarSpec->getSpecializedTemplate(), Def, VarSpec->getTemplateArgsInfo(),
+ VarSpec->getTemplateArgs().asArray(), VarSpec));
if (Var) {
llvm::PointerUnion<VarTemplateDecl *,
VarTemplatePartialSpecializationDecl *> PatternPtr =
@@ -5314,12 +5394,6 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
cast<VarTemplateSpecializationDecl>(Var)->setInstantiationOf(
Partial, &VarSpec->getTemplateInstantiationArgs());
- // Merge the definition with the declaration.
- LookupResult R(*this, Var->getDeclName(), Var->getLocation(),
- LookupOrdinaryName, forRedeclarationInCurContext());
- R.addDecl(OldVar);
- MergeVarDecl(Var, R);
-
// Attach the initializer.
InstantiateVariableInitializer(Var, Def, TemplateArgs);
}
@@ -5972,16 +6046,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
return nullptr;
DeclContext::lookup_result Found = ParentDC->lookup(Name);
- if (auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(D)) {
- VarTemplateDecl *Templ = cast_or_null<VarTemplateDecl>(
- findInstantiationOf(Context, VTSD->getSpecializedTemplate(),
- Found.begin(), Found.end()));
- if (!Templ)
- return nullptr;
- Result = getVarTemplateSpecialization(
- Templ, &VTSD->getTemplateArgsInfo(), NewNameInfo, SourceLocation());
- } else
- Result = findInstantiationOf(Context, D, Found.begin(), Found.end());
+ Result = findInstantiationOf(Context, D, Found.begin(), Found.end());
} else {
// Since we don't have a name for the entity we're looking for,
// our only option is to walk through all of the declarations to
@@ -5999,7 +6064,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
if (!Result) {
if (isa<UsingShadowDecl>(D)) {
// UsingShadowDecls can instantiate to nothing because of using hiding.
- } else if (Diags.hasUncompilableErrorOccurred()) {
+ } else if (hasUncompilableErrorOccurred()) {
// We've already complained about some ill-formed code, so most likely
// this declaration failed to instantiate. There's no point in
// complaining further, since this is normal in invalid code.
diff --git a/clang/lib/Sema/SemaTemplateVariadic.cpp b/clang/lib/Sema/SemaTemplateVariadic.cpp
index 7b77d1cb482a..1951aec3d17d 100644
--- a/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -368,8 +368,8 @@ Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
Locations.push_back(Unexpanded[I].second);
}
- DiagnosticBuilder DB = Diag(Loc, diag::err_unexpanded_parameter_pack)
- << (int)UPPC << (int)Names.size();
+ auto DB = Diag(Loc, diag::err_unexpanded_parameter_pack)
+ << (int)UPPC << (int)Names.size();
for (size_t I = 0, E = std::min(Names.size(), (size_t)2); I != E; ++I)
DB << Names[I];
@@ -408,6 +408,29 @@ bool Sema::DiagnoseUnexpandedParameterPack(Expr *E,
return DiagnoseUnexpandedParameterPacks(E->getBeginLoc(), UPPC, Unexpanded);
}
+bool Sema::DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE) {
+ if (!RE->containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(RE);
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+
+ // We only care about unexpanded references to the RequiresExpr's own
+ // parameter packs.
+ auto Parms = RE->getLocalParameters();
+ llvm::SmallPtrSet<NamedDecl*, 8> ParmSet(Parms.begin(), Parms.end());
+ SmallVector<UnexpandedParameterPack, 2> UnexpandedParms;
+ for (auto Parm : Unexpanded)
+ if (ParmSet.contains(Parm.first.dyn_cast<NamedDecl*>()))
+ UnexpandedParms.push_back(Parm);
+ if (UnexpandedParms.empty())
+ return false;
+
+ return DiagnoseUnexpandedParameterPacks(RE->getBeginLoc(), UPPC_Requirement,
+ UnexpandedParms);
+}
+
bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC) {
// C++0x [temp.variadic]p5:
@@ -614,7 +637,8 @@ QualType Sema::CheckPackExpansion(QualType Pattern, SourceRange PatternRange,
return QualType();
}
- return Context.getPackExpansionType(Pattern, NumExpansions);
+ return Context.getPackExpansionType(Pattern, NumExpansions,
+ /*ExpectPackInType=*/false);
}
ExprResult Sema::ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc) {
@@ -1071,7 +1095,7 @@ Sema::getTemplateArgumentPackExpansionPattern(
case TemplateArgument::TemplateExpansion:
Ellipsis = OrigLoc.getTemplateEllipsisLoc();
NumExpansions = Argument.getNumTemplateExpansions();
- return TemplateArgumentLoc(Argument.getPackExpansionPattern(),
+ return TemplateArgumentLoc(Context, Argument.getPackExpansionPattern(),
OrigLoc.getTemplateQualifierLoc(),
OrigLoc.getTemplateNameLoc());
@@ -1159,7 +1183,7 @@ static void CheckFoldOperand(Sema &S, Expr *E) {
}
}
-ExprResult Sema::ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
+ExprResult Sema::ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc) {
@@ -1201,18 +1225,37 @@ ExprResult Sema::ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
}
BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Operator);
- return BuildCXXFoldExpr(LParenLoc, LHS, Opc, EllipsisLoc, RHS, RParenLoc,
+
+ // Perform first-phase name lookup now.
+ UnresolvedLookupExpr *ULE = nullptr;
+ {
+ UnresolvedSet<16> Functions;
+ LookupBinOp(S, EllipsisLoc, Opc, Functions);
+ if (!Functions.empty()) {
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(
+ BinaryOperator::getOverloadedOperator(Opc));
+ ExprResult Callee = CreateUnresolvedLookupExpr(
+ /*NamingClass*/ nullptr, NestedNameSpecifierLoc(),
+ DeclarationNameInfo(OpName, EllipsisLoc), Functions);
+ if (Callee.isInvalid())
+ return ExprError();
+ ULE = cast<UnresolvedLookupExpr>(Callee.get());
+ }
+ }
+
+ return BuildCXXFoldExpr(ULE, LParenLoc, LHS, Opc, EllipsisLoc, RHS, RParenLoc,
None);
}
-ExprResult Sema::BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
+ExprResult Sema::BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
+ SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions) {
- return new (Context) CXXFoldExpr(Context.DependentTy, LParenLoc, LHS,
- Operator, EllipsisLoc, RHS, RParenLoc,
- NumExpansions);
+ return new (Context)
+ CXXFoldExpr(Context.DependentTy, Callee, LParenLoc, LHS, Operator,
+ EllipsisLoc, RHS, RParenLoc, NumExpansions);
}
ExprResult Sema::BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp
index b8f7f1a58159..4178024d1264 100644
--- a/clang/lib/Sema/SemaType.cpp
+++ b/clang/lib/Sema/SemaType.cpp
@@ -37,6 +37,7 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/Support/ErrorHandling.h"
+#include <bitset>
using namespace clang;
@@ -49,7 +50,7 @@ enum TypeDiagSelector {
/// isOmittedBlockReturnType - Return true if this declarator is missing a
/// return type because this is a omitted return type on a block literal.
static bool isOmittedBlockReturnType(const Declarator &D) {
- if (D.getContext() != DeclaratorContext::BlockLiteralContext ||
+ if (D.getContext() != DeclaratorContext::BlockLiteral ||
D.getDeclSpec().hasTypeSpecifier())
return false;
@@ -146,6 +147,7 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
#define NULLABILITY_TYPE_ATTRS_CASELIST \
case ParsedAttr::AT_TypeNonNull: \
case ParsedAttr::AT_TypeNullable: \
+ case ParsedAttr::AT_TypeNullableResult: \
case ParsedAttr::AT_TypeNullUnspecified
namespace {
@@ -1299,27 +1301,27 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.VoidTy;
break;
case DeclSpec::TST_char:
- if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
+ if (DS.getTypeSpecSign() == TypeSpecifierSign::Unspecified)
Result = Context.CharTy;
- else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed)
+ else if (DS.getTypeSpecSign() == TypeSpecifierSign::Signed)
Result = Context.SignedCharTy;
else {
- assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
+ assert(DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned &&
"Unknown TSS value");
Result = Context.UnsignedCharTy;
}
break;
case DeclSpec::TST_wchar:
- if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
+ if (DS.getTypeSpecSign() == TypeSpecifierSign::Unspecified)
Result = Context.WCharTy;
- else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) {
+ else if (DS.getTypeSpecSign() == TypeSpecifierSign::Signed) {
S.Diag(DS.getTypeSpecSignLoc(), diag::ext_wchar_t_sign_spec)
<< DS.getSpecifierName(DS.getTypeSpecType(),
Context.getPrintingPolicy());
Result = Context.getSignedWCharType();
} else {
- assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
- "Unknown TSS value");
+ assert(DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned &&
+ "Unknown TSS value");
S.Diag(DS.getTypeSpecSignLoc(), diag::ext_wchar_t_sign_spec)
<< DS.getSpecifierName(DS.getTypeSpecType(),
Context.getPrintingPolicy());
@@ -1327,19 +1329,19 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
break;
case DeclSpec::TST_char8:
- assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
- "Unknown TSS value");
- Result = Context.Char8Ty;
+ assert(DS.getTypeSpecSign() == TypeSpecifierSign::Unspecified &&
+ "Unknown TSS value");
+ Result = Context.Char8Ty;
break;
case DeclSpec::TST_char16:
- assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
- "Unknown TSS value");
- Result = Context.Char16Ty;
+ assert(DS.getTypeSpecSign() == TypeSpecifierSign::Unspecified &&
+ "Unknown TSS value");
+ Result = Context.Char16Ty;
break;
case DeclSpec::TST_char32:
- assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
- "Unknown TSS value");
- Result = Context.Char32Ty;
+ assert(DS.getTypeSpecSign() == TypeSpecifierSign::Unspecified &&
+ "Unknown TSS value");
+ Result = Context.Char32Ty;
break;
case DeclSpec::TST_unspecified:
// If this is a missing declspec in a block literal return context, then it
@@ -1347,12 +1349,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// The declspec is always missing in a lambda expr context; it is either
// specified with a trailing return type or inferred.
if (S.getLangOpts().CPlusPlus14 &&
- declarator.getContext() == DeclaratorContext::LambdaExprContext) {
+ declarator.getContext() == DeclaratorContext::LambdaExpr) {
// In C++1y, a lambda's implicit return type is 'auto'.
Result = Context.getAutoDeductType();
break;
- } else if (declarator.getContext() ==
- DeclaratorContext::LambdaExprContext ||
+ } else if (declarator.getContext() == DeclaratorContext::LambdaExpr ||
checkOmittedBlockReturnType(S, declarator,
Context.DependentTy)) {
Result = Context.DependentTy;
@@ -1401,12 +1402,18 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
LLVM_FALLTHROUGH;
case DeclSpec::TST_int: {
- if (DS.getTypeSpecSign() != DeclSpec::TSS_unsigned) {
+ if (DS.getTypeSpecSign() != TypeSpecifierSign::Unsigned) {
switch (DS.getTypeSpecWidth()) {
- case DeclSpec::TSW_unspecified: Result = Context.IntTy; break;
- case DeclSpec::TSW_short: Result = Context.ShortTy; break;
- case DeclSpec::TSW_long: Result = Context.LongTy; break;
- case DeclSpec::TSW_longlong:
+ case TypeSpecifierWidth::Unspecified:
+ Result = Context.IntTy;
+ break;
+ case TypeSpecifierWidth::Short:
+ Result = Context.ShortTy;
+ break;
+ case TypeSpecifierWidth::Long:
+ Result = Context.LongTy;
+ break;
+ case TypeSpecifierWidth::LongLong:
Result = Context.LongLongTy;
// 'long long' is a C99 or C++11 feature.
@@ -1422,10 +1429,16 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
} else {
switch (DS.getTypeSpecWidth()) {
- case DeclSpec::TSW_unspecified: Result = Context.UnsignedIntTy; break;
- case DeclSpec::TSW_short: Result = Context.UnsignedShortTy; break;
- case DeclSpec::TSW_long: Result = Context.UnsignedLongTy; break;
- case DeclSpec::TSW_longlong:
+ case TypeSpecifierWidth::Unspecified:
+ Result = Context.UnsignedIntTy;
+ break;
+ case TypeSpecifierWidth::Short:
+ Result = Context.UnsignedShortTy;
+ break;
+ case TypeSpecifierWidth::Long:
+ Result = Context.UnsignedLongTy;
+ break;
+ case TypeSpecifierWidth::LongLong:
Result = Context.UnsignedLongLongTy;
// 'long long' is a C99 or C++11 feature.
@@ -1446,8 +1459,9 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
if (!S.Context.getTargetInfo().hasExtIntType())
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "_ExtInt";
- Result = S.BuildExtIntType(DS.getTypeSpecSign() == TSS_unsigned,
- DS.getRepAsExpr(), DS.getBeginLoc());
+ Result =
+ S.BuildExtIntType(DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned,
+ DS.getRepAsExpr(), DS.getBeginLoc());
if (Result.isNull()) {
Result = Context.IntTy;
declarator.setInvalidType(true);
@@ -1456,20 +1470,20 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
case DeclSpec::TST_accum: {
switch (DS.getTypeSpecWidth()) {
- case DeclSpec::TSW_short:
- Result = Context.ShortAccumTy;
- break;
- case DeclSpec::TSW_unspecified:
- Result = Context.AccumTy;
- break;
- case DeclSpec::TSW_long:
- Result = Context.LongAccumTy;
- break;
- case DeclSpec::TSW_longlong:
- llvm_unreachable("Unable to specify long long as _Accum width");
+ case TypeSpecifierWidth::Short:
+ Result = Context.ShortAccumTy;
+ break;
+ case TypeSpecifierWidth::Unspecified:
+ Result = Context.AccumTy;
+ break;
+ case TypeSpecifierWidth::Long:
+ Result = Context.LongAccumTy;
+ break;
+ case TypeSpecifierWidth::LongLong:
+ llvm_unreachable("Unable to specify long long as _Accum width");
}
- if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
+ if (DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned)
Result = Context.getCorrespondingUnsignedType(Result);
if (DS.isTypeSpecSat())
@@ -1479,20 +1493,20 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
case DeclSpec::TST_fract: {
switch (DS.getTypeSpecWidth()) {
- case DeclSpec::TSW_short:
- Result = Context.ShortFractTy;
- break;
- case DeclSpec::TSW_unspecified:
- Result = Context.FractTy;
- break;
- case DeclSpec::TSW_long:
- Result = Context.LongFractTy;
- break;
- case DeclSpec::TSW_longlong:
- llvm_unreachable("Unable to specify long long as _Fract width");
+ case TypeSpecifierWidth::Short:
+ Result = Context.ShortFractTy;
+ break;
+ case TypeSpecifierWidth::Unspecified:
+ Result = Context.FractTy;
+ break;
+ case TypeSpecifierWidth::Long:
+ Result = Context.LongFractTy;
+ break;
+ case TypeSpecifierWidth::LongLong:
+ llvm_unreachable("Unable to specify long long as _Fract width");
}
- if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
+ if (DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned)
Result = Context.getCorrespondingUnsignedType(Result);
if (DS.isTypeSpecSat())
@@ -1502,10 +1516,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
case DeclSpec::TST_int128:
if (!S.Context.getTargetInfo().hasInt128Type() &&
+ !S.getLangOpts().SYCLIsDevice &&
!(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__int128";
- if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
+ if (DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned)
Result = Context.UnsignedInt128Ty;
else
Result = Context.Int128Ty;
@@ -1529,7 +1544,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
case DeclSpec::TST_float: Result = Context.FloatTy; break;
case DeclSpec::TST_double:
- if (DS.getTypeSpecWidth() == DeclSpec::TSW_long)
+ if (DS.getTypeSpecWidth() == TypeSpecifierWidth::Long)
Result = Context.LongDoubleTy;
else
Result = Context.DoubleTy;
@@ -1542,7 +1557,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
<< "__float128";
Result = Context.Float128Ty;
break;
- case DeclSpec::TST_bool: Result = Context.BoolTy; break; // _Bool or bool
+ case DeclSpec::TST_bool:
+ Result = Context.BoolTy; // _Bool or bool
break;
case DeclSpec::TST_decimal32: // _Decimal32
case DeclSpec::TST_decimal64: // _Decimal64
@@ -1567,8 +1583,10 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// If the type is deprecated or unavailable, diagnose it.
S.DiagnoseUseOfDecl(D, DS.getTypeSpecTypeNameLoc());
- assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
- DS.getTypeSpecSign() == 0 && "No qualifiers on tag names!");
+ assert(DS.getTypeSpecWidth() == TypeSpecifierWidth::Unspecified &&
+ DS.getTypeSpecComplex() == 0 &&
+ DS.getTypeSpecSign() == TypeSpecifierSign::Unspecified &&
+ "No qualifiers on tag names!");
// TypeQuals handled by caller.
Result = Context.getTypeDeclType(D);
@@ -1581,8 +1599,9 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
}
case DeclSpec::TST_typename: {
- assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
- DS.getTypeSpecSign() == 0 &&
+ assert(DS.getTypeSpecWidth() == TypeSpecifierWidth::Unspecified &&
+ DS.getTypeSpecComplex() == 0 &&
+ DS.getTypeSpecSign() == TypeSpecifierSign::Unspecified &&
"Can't handle qualifiers on typedef names yet!");
Result = S.GetTypeFromParser(DS.getRepAsType());
if (Result.isNull()) {
@@ -1739,7 +1758,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// Before we process any type attributes, synthesize a block literal
// function declarator if necessary.
- if (declarator.getContext() == DeclaratorContext::BlockLiteralContext)
+ if (declarator.getContext() == DeclaratorContext::BlockLiteral)
maybeSynthesizeBlockSignature(state, Result);
// Apply any type attributes from the decl spec. This may cause the
@@ -2070,7 +2089,8 @@ QualType Sema::BuildPointerType(QualType T,
return QualType();
}
- if (T->isFunctionType() && getLangOpts().OpenCL) {
+ if (T->isFunctionType() && getLangOpts().OpenCL &&
+ !getOpenCLOptions().isEnabled("__cl_clang_function_pointers")) {
Diag(Loc, diag::err_opencl_function_pointer);
return QualType();
}
@@ -2194,7 +2214,8 @@ QualType Sema::BuildExtIntType(bool IsUnsigned, Expr *BitWidth,
return Context.getDependentExtIntType(IsUnsigned, BitWidth);
llvm::APSInt Bits(32);
- ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Bits);
+ ExprResult ICE =
+ VerifyIntegerConstantExpression(BitWidth, &Bits, /*FIXME*/ AllowFold);
if (ICE.isInvalid())
return QualType();
@@ -2219,26 +2240,62 @@ QualType Sema::BuildExtIntType(bool IsUnsigned, Expr *BitWidth,
return Context.getExtIntType(IsUnsigned, NumBits);
}
-/// Check whether the specified array size makes the array type a VLA. If so,
-/// return true, if not, return the size of the array in SizeVal.
-static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) {
+/// Check whether the specified array bound can be evaluated using the relevant
+/// language rules. If so, returns the possibly-converted expression and sets
+/// SizeVal to the size. If not, but the expression might be a VLA bound,
+/// returns ExprResult(). Otherwise, produces a diagnostic and returns
+/// ExprError().
+static ExprResult checkArraySize(Sema &S, Expr *&ArraySize,
+ llvm::APSInt &SizeVal, unsigned VLADiag,
+ bool VLAIsError) {
+ if (S.getLangOpts().CPlusPlus14 &&
+ (VLAIsError ||
+ !ArraySize->getType()->isIntegralOrUnscopedEnumerationType())) {
+ // C++14 [dcl.array]p1:
+ // The constant-expression shall be a converted constant expression of
+ // type std::size_t.
+ //
+ // Don't apply this rule if we might be forming a VLA: in that case, we
+ // allow non-constant expressions and constant-folding. We only need to use
+ // the converted constant expression rules (to properly convert the source)
+ // when the source expression is of class type.
+ return S.CheckConvertedConstantExpression(
+ ArraySize, S.Context.getSizeType(), SizeVal, Sema::CCEK_ArrayBound);
+ }
+
// If the size is an ICE, it certainly isn't a VLA. If we're in a GNU mode
// (like gnu99, but not c99) accept any evaluatable value as an extension.
class VLADiagnoser : public Sema::VerifyICEDiagnoser {
public:
- VLADiagnoser() : Sema::VerifyICEDiagnoser(true) {}
+ unsigned VLADiag;
+ bool VLAIsError;
+ bool IsVLA = false;
+
+ VLADiagnoser(unsigned VLADiag, bool VLAIsError)
+ : VLADiag(VLADiag), VLAIsError(VLAIsError) {}
- void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
+ Sema::SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_array_size_non_int) << T;
}
- void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR) override {
- S.Diag(Loc, diag::ext_vla_folded_to_constant) << SR;
+ Sema::SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
+ SourceLocation Loc) override {
+ IsVLA = !VLAIsError;
+ return S.Diag(Loc, VLADiag);
}
- } Diagnoser;
- return S.VerifyIntegerConstantExpression(ArraySize, &SizeVal, Diagnoser,
- S.LangOpts.GNUMode ||
- S.LangOpts.OpenCL).isInvalid();
+ Sema::SemaDiagnosticBuilder diagnoseFold(Sema &S,
+ SourceLocation Loc) override {
+ return S.Diag(Loc, diag::ext_vla_folded_to_constant);
+ }
+ } Diagnoser(VLADiag, VLAIsError);
+
+ ExprResult R =
+ S.VerifyIntegerConstantExpression(ArraySize, &SizeVal, Diagnoser);
+ if (Diagnoser.IsVLA)
+ return ExprResult();
+ return R;
}
/// Build an array type.
@@ -2350,68 +2407,95 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
return QualType();
}
+ // VLAs always produce at least a -Wvla diagnostic, sometimes an error.
+ unsigned VLADiag;
+ bool VLAIsError;
+ if (getLangOpts().OpenCL) {
+ // OpenCL v1.2 s6.9.d: variable length arrays are not supported.
+ VLADiag = diag::err_opencl_vla;
+ VLAIsError = true;
+ } else if (getLangOpts().C99) {
+ VLADiag = diag::warn_vla_used;
+ VLAIsError = false;
+ } else if (isSFINAEContext()) {
+ VLADiag = diag::err_vla_in_sfinae;
+ VLAIsError = true;
+ } else {
+ VLADiag = diag::ext_vla;
+ VLAIsError = false;
+ }
+
llvm::APSInt ConstVal(Context.getTypeSize(Context.getSizeType()));
if (!ArraySize) {
- if (ASM == ArrayType::Star)
+ if (ASM == ArrayType::Star) {
+ Diag(Loc, VLADiag);
+ if (VLAIsError)
+ return QualType();
+
T = Context.getVariableArrayType(T, nullptr, ASM, Quals, Brackets);
- else
+ } else {
T = Context.getIncompleteArrayType(T, ASM, Quals);
+ }
} else if (ArraySize->isTypeDependent() || ArraySize->isValueDependent()) {
T = Context.getDependentSizedArrayType(T, ArraySize, ASM, Quals, Brackets);
- } else if ((!T->isDependentType() && !T->isIncompleteType() &&
- !T->isConstantSizeType()) ||
- isArraySizeVLA(*this, ArraySize, ConstVal)) {
- // Even in C++11, don't allow contextual conversions in the array bound
- // of a VLA.
- if (getLangOpts().CPlusPlus11 &&
- !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
- Diag(ArraySize->getBeginLoc(), diag::err_array_size_non_int)
- << ArraySize->getType() << ArraySize->getSourceRange();
+ } else {
+ ExprResult R =
+ checkArraySize(*this, ArraySize, ConstVal, VLADiag, VLAIsError);
+ if (R.isInvalid())
return QualType();
- }
- // C99: an array with an element type that has a non-constant-size is a VLA.
- // C99: an array with a non-ICE size is a VLA. We accept any expression
- // that we can fold to a non-zero positive value as an extension.
- T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets);
- } else {
- // C99 6.7.5.2p1: If the expression is a constant expression, it shall
- // have a value greater than zero.
- if (ConstVal.isSigned() && ConstVal.isNegative()) {
- if (Entity)
- Diag(ArraySize->getBeginLoc(), diag::err_decl_negative_array_size)
- << getPrintableNameForEntity(Entity) << ArraySize->getSourceRange();
- else
- Diag(ArraySize->getBeginLoc(), diag::err_typecheck_negative_array_size)
+ if (!R.isUsable()) {
+ // C99: an array with a non-ICE size is a VLA. We accept any expression
+ // that we can fold to a non-zero positive value as a non-VLA as an
+ // extension.
+ T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets);
+ } else if (!T->isDependentType() && !T->isIncompleteType() &&
+ !T->isConstantSizeType()) {
+ // C99: an array with an element type that has a non-constant-size is a
+ // VLA.
+ // FIXME: Add a note to explain why this isn't a VLA.
+ Diag(Loc, VLADiag);
+ if (VLAIsError)
+ return QualType();
+ T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets);
+ } else {
+ // C99 6.7.5.2p1: If the expression is a constant expression, it shall
+ // have a value greater than zero.
+ // In C++, this follows from narrowing conversions being disallowed.
+ if (ConstVal.isSigned() && ConstVal.isNegative()) {
+ if (Entity)
+ Diag(ArraySize->getBeginLoc(), diag::err_decl_negative_array_size)
+ << getPrintableNameForEntity(Entity)
+ << ArraySize->getSourceRange();
+ else
+ Diag(ArraySize->getBeginLoc(),
+ diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange();
+ return QualType();
+ }
+ if (ConstVal == 0) {
+ // GCC accepts zero sized static arrays. We allow them when
+ // we're not in a SFINAE context.
+ Diag(ArraySize->getBeginLoc(),
+ isSFINAEContext() ? diag::err_typecheck_zero_array_size
+ : diag::ext_typecheck_zero_array_size)
<< ArraySize->getSourceRange();
- return QualType();
- }
- if (ConstVal == 0) {
- // GCC accepts zero sized static arrays. We allow them when
- // we're not in a SFINAE context.
- Diag(ArraySize->getBeginLoc(), isSFINAEContext()
- ? diag::err_typecheck_zero_array_size
- : diag::ext_typecheck_zero_array_size)
- << ArraySize->getSourceRange();
- } else if (!T->isDependentType() && !T->isVariablyModifiedType() &&
- !T->isIncompleteType() && !T->isUndeducedType()) {
+ }
+
// Is the array too large?
- unsigned ActiveSizeBits
- = ConstantArrayType::getNumAddressingBits(Context, T, ConstVal);
+ unsigned ActiveSizeBits =
+ (!T->isDependentType() && !T->isVariablyModifiedType() &&
+ !T->isIncompleteType() && !T->isUndeducedType())
+ ? ConstantArrayType::getNumAddressingBits(Context, T, ConstVal)
+ : ConstVal.getActiveBits();
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
Diag(ArraySize->getBeginLoc(), diag::err_array_too_large)
<< ConstVal.toString(10) << ArraySize->getSourceRange();
return QualType();
}
- }
- T = Context.getConstantArrayType(T, ConstVal, ArraySize, ASM, Quals);
- }
-
- // OpenCL v1.2 s6.9.d: variable length arrays are not supported.
- if (getLangOpts().OpenCL && T->isVariableArrayType()) {
- Diag(Loc, diag::err_opencl_vla);
- return QualType();
+ T = Context.getConstantArrayType(T, ConstVal, ArraySize, ASM, Quals);
+ }
}
if (T->isVariableArrayType() && !Context.getTargetInfo().isVLASupported()) {
@@ -2424,26 +2508,12 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
: CFT_InvalidTarget);
}
- // If this is not C99, extwarn about VLA's and C99 array size modifiers.
- if (!getLangOpts().C99) {
- if (T->isVariableArrayType()) {
- // Prohibit the use of VLAs during template argument deduction.
- if (isSFINAEContext()) {
- Diag(Loc, diag::err_vla_in_sfinae);
- return QualType();
- }
- // Just extwarn about VLAs.
- else
- Diag(Loc, diag::ext_vla);
- } else if (ASM != ArrayType::Normal || Quals != 0)
- Diag(Loc,
- getLangOpts().CPlusPlus? diag::err_c99_array_usage_cxx
- : diag::ext_c99_array_usage) << ASM;
- }
-
- if (T->isVariableArrayType()) {
- // Warn about VLAs for -Wvla.
- Diag(Loc, diag::warn_vla_used);
+ // If this is not C99, diagnose array size modifiers on non-VLAs.
+ if (!getLangOpts().C99 && !T->isVariableArrayType() &&
+ (ASM != ArrayType::Normal || Quals != 0)) {
+ Diag(Loc, getLangOpts().CPlusPlus ? diag::err_c99_array_usage_cxx
+ : diag::ext_c99_array_usage)
+ << ASM;
}
// OpenCL v2.0 s6.12.5 - Arrays of blocks are not supported.
@@ -2465,9 +2535,10 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
SourceLocation AttrLoc) {
// The base type must be integer (not Boolean or enumeration) or float, and
// can't already be a vector.
- if (!CurType->isDependentType() &&
- (!CurType->isBuiltinType() || CurType->isBooleanType() ||
- (!CurType->isIntegerType() && !CurType->isRealFloatingType()))) {
+ if ((!CurType->isDependentType() &&
+ (!CurType->isBuiltinType() || CurType->isBooleanType() ||
+ (!CurType->isIntegerType() && !CurType->isRealFloatingType()))) ||
+ CurType->isArrayType()) {
Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << CurType;
return QualType();
}
@@ -2476,8 +2547,8 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc,
VectorType::GenericVector);
- llvm::APSInt VecSize(32);
- if (!SizeExpr->isIntegerConstantExpr(VecSize, Context)) {
+ Optional<llvm::APSInt> VecSize = SizeExpr->getIntegerConstantExpr(Context);
+ if (!VecSize) {
Diag(AttrLoc, diag::err_attribute_argument_type)
<< "vector_size" << AANT_ArgumentIntegerConstant
<< SizeExpr->getSourceRange();
@@ -2489,13 +2560,13 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
VectorType::GenericVector);
// vecSize is specified in bytes - convert to bits.
- if (!VecSize.isIntN(61)) {
+ if (!VecSize->isIntN(61)) {
// Bit size will overflow uint64.
Diag(AttrLoc, diag::err_attribute_size_too_large)
<< SizeExpr->getSourceRange() << "vector";
return QualType();
}
- uint64_t VectorSizeBits = VecSize.getZExtValue() * 8;
+ uint64_t VectorSizeBits = VecSize->getZExtValue() * 8;
unsigned TypeSize = static_cast<unsigned>(Context.getTypeSize(CurType));
if (VectorSizeBits == 0) {
@@ -2540,22 +2611,22 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
}
if (!ArraySize->isTypeDependent() && !ArraySize->isValueDependent()) {
- llvm::APSInt vecSize(32);
- if (!ArraySize->isIntegerConstantExpr(vecSize, Context)) {
+ Optional<llvm::APSInt> vecSize = ArraySize->getIntegerConstantExpr(Context);
+ if (!vecSize) {
Diag(AttrLoc, diag::err_attribute_argument_type)
<< "ext_vector_type" << AANT_ArgumentIntegerConstant
<< ArraySize->getSourceRange();
return QualType();
}
- if (!vecSize.isIntN(32)) {
+ if (!vecSize->isIntN(32)) {
Diag(AttrLoc, diag::err_attribute_size_too_large)
<< ArraySize->getSourceRange() << "vector";
return QualType();
}
// Unlike gcc's vector_size attribute, the size is specified as the
// number of elements, not the number of bytes.
- unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue());
+ unsigned vectorSize = static_cast<unsigned>(vecSize->getZExtValue());
if (vectorSize == 0) {
Diag(AttrLoc, diag::err_attribute_zero_size)
@@ -2586,18 +2657,15 @@ QualType Sema::BuildMatrixType(QualType ElementTy, Expr *NumRows, Expr *NumCols,
return Context.getDependentSizedMatrixType(ElementTy, NumRows, NumCols,
AttrLoc);
- // Both row and column values can only be 20 bit wide currently.
- llvm::APSInt ValueRows(32), ValueColumns(32);
-
- bool const RowsIsInteger = NumRows->isIntegerConstantExpr(ValueRows, Context);
- bool const ColumnsIsInteger =
- NumCols->isIntegerConstantExpr(ValueColumns, Context);
+ Optional<llvm::APSInt> ValueRows = NumRows->getIntegerConstantExpr(Context);
+ Optional<llvm::APSInt> ValueColumns =
+ NumCols->getIntegerConstantExpr(Context);
auto const RowRange = NumRows->getSourceRange();
auto const ColRange = NumCols->getSourceRange();
// Both are row and column expressions are invalid.
- if (!RowsIsInteger && !ColumnsIsInteger) {
+ if (!ValueRows && !ValueColumns) {
Diag(AttrLoc, diag::err_attribute_argument_type)
<< "matrix_type" << AANT_ArgumentIntegerConstant << RowRange
<< ColRange;
@@ -2605,22 +2673,22 @@ QualType Sema::BuildMatrixType(QualType ElementTy, Expr *NumRows, Expr *NumCols,
}
// Only the row expression is invalid.
- if (!RowsIsInteger) {
+ if (!ValueRows) {
Diag(AttrLoc, diag::err_attribute_argument_type)
<< "matrix_type" << AANT_ArgumentIntegerConstant << RowRange;
return QualType();
}
// Only the column expression is invalid.
- if (!ColumnsIsInteger) {
+ if (!ValueColumns) {
Diag(AttrLoc, diag::err_attribute_argument_type)
<< "matrix_type" << AANT_ArgumentIntegerConstant << ColRange;
return QualType();
}
// Check the matrix dimensions.
- unsigned MatrixRows = static_cast<unsigned>(ValueRows.getZExtValue());
- unsigned MatrixColumns = static_cast<unsigned>(ValueColumns.getZExtValue());
+ unsigned MatrixRows = static_cast<unsigned>(ValueRows->getZExtValue());
+ unsigned MatrixColumns = static_cast<unsigned>(ValueColumns->getZExtValue());
if (MatrixRows == 0 && MatrixColumns == 0) {
Diag(AttrLoc, diag::err_attribute_zero_size)
<< "matrix" << RowRange << ColRange;
@@ -3034,12 +3102,12 @@ void Sema::diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
static void diagnoseRedundantReturnTypeQualifiers(Sema &S, QualType RetTy,
Declarator &D,
unsigned FunctionChunkIndex) {
- if (D.getTypeObject(FunctionChunkIndex).Fun.hasTrailingReturnType()) {
- // FIXME: TypeSourceInfo doesn't preserve location information for
- // qualifiers.
+ const DeclaratorChunk::FunctionTypeInfo &FTI =
+ D.getTypeObject(FunctionChunkIndex).Fun;
+ if (FTI.hasTrailingReturnType()) {
S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type,
RetTy.getLocalCVRQualifiers(),
- D.getIdentifierLoc());
+ FTI.getTrailingReturnTypeLoc());
return;
}
@@ -3057,11 +3125,11 @@ static void diagnoseRedundantReturnTypeQualifiers(Sema &S, QualType RetTy,
diag::warn_qual_return_type,
PTI.TypeQuals,
SourceLocation(),
- SourceLocation::getFromRawEncoding(PTI.ConstQualLoc),
- SourceLocation::getFromRawEncoding(PTI.VolatileQualLoc),
- SourceLocation::getFromRawEncoding(PTI.RestrictQualLoc),
- SourceLocation::getFromRawEncoding(PTI.AtomicQualLoc),
- SourceLocation::getFromRawEncoding(PTI.UnalignedQualLoc));
+ PTI.ConstQualLoc,
+ PTI.VolatileQualLoc,
+ PTI.RestrictQualLoc,
+ PTI.AtomicQualLoc,
+ PTI.UnalignedQualLoc);
return;
}
@@ -3101,24 +3169,10 @@ static void diagnoseRedundantReturnTypeQualifiers(Sema &S, QualType RetTy,
D.getDeclSpec().getUnalignedSpecLoc());
}
-static void CopyTypeConstraintFromAutoType(Sema &SemaRef, const AutoType *Auto,
- AutoTypeLoc AutoLoc,
- TemplateTypeParmDecl *TP,
- SourceLocation EllipsisLoc) {
-
- TemplateArgumentListInfo TAL(AutoLoc.getLAngleLoc(), AutoLoc.getRAngleLoc());
- for (unsigned Idx = 0; Idx < AutoLoc.getNumArgs(); ++Idx)
- TAL.addArgument(AutoLoc.getArgLoc(Idx));
-
- SemaRef.AttachTypeConstraint(
- AutoLoc.getNestedNameSpecifierLoc(), AutoLoc.getConceptNameInfo(),
- AutoLoc.getNamedConcept(),
- AutoLoc.hasExplicitTemplateArgs() ? &TAL : nullptr, TP, EllipsisLoc);
-}
-
-static QualType InventTemplateParameter(
- TypeProcessingState &state, QualType T, TypeSourceInfo *TSI, AutoType *Auto,
- InventedTemplateParameterInfo &Info) {
+static std::pair<QualType, TypeSourceInfo *>
+InventTemplateParameter(TypeProcessingState &state, QualType T,
+ TypeSourceInfo *TrailingTSI, AutoType *Auto,
+ InventedTemplateParameterInfo &Info) {
Sema &S = state.getSema();
Declarator &D = state.getDeclarator();
@@ -3143,13 +3197,25 @@ static QualType InventTemplateParameter(
IsParameterPack, /*HasTypeConstraint=*/Auto->isConstrained());
InventedTemplateParam->setImplicit();
Info.TemplateParams.push_back(InventedTemplateParam);
- // Attach type constraints
+
+ // Attach type constraints to the new parameter.
if (Auto->isConstrained()) {
- if (TSI) {
- CopyTypeConstraintFromAutoType(
- S, Auto, TSI->getTypeLoc().getContainedAutoTypeLoc(),
- InventedTemplateParam, D.getEllipsisLoc());
+ if (TrailingTSI) {
+ // The 'auto' appears in a trailing return type we've already built;
+ // extract its type constraints to attach to the template parameter.
+ AutoTypeLoc AutoLoc = TrailingTSI->getTypeLoc().getContainedAutoTypeLoc();
+ TemplateArgumentListInfo TAL(AutoLoc.getLAngleLoc(), AutoLoc.getRAngleLoc());
+ for (unsigned Idx = 0; Idx < AutoLoc.getNumArgs(); ++Idx)
+ TAL.addArgument(AutoLoc.getArgLoc(Idx));
+
+ S.AttachTypeConstraint(AutoLoc.getNestedNameSpecifierLoc(),
+ AutoLoc.getConceptNameInfo(),
+ AutoLoc.getNamedConcept(),
+ AutoLoc.hasExplicitTemplateArgs() ? &TAL : nullptr,
+ InventedTemplateParam, D.getEllipsisLoc());
} else {
+ // The 'auto' appears in the decl-specifiers; we've not finished forming
+ // TypeSourceInfo for it yet.
TemplateIdAnnotation *TemplateId = D.getDeclSpec().getRepAsTemplateId();
TemplateArgumentListInfo TemplateArgsInfo;
if (TemplateId->LAngleLoc.isValid()) {
@@ -3167,15 +3233,16 @@ static QualType InventTemplateParameter(
}
}
- // If TSI is nullptr, this is a constrained declspec auto and the type
- // constraint will be attached later in TypeSpecLocFiller
-
// Replace the 'auto' in the function parameter with this invented
// template type parameter.
// FIXME: Retain some type sugar to indicate that this was written
// as 'auto'?
- return state.ReplaceAutoType(
- T, QualType(InventedTemplateParam->getTypeForDecl(), 0));
+ QualType Replacement(InventedTemplateParam->getTypeForDecl(), 0);
+ QualType NewT = state.ReplaceAutoType(T, Replacement);
+ TypeSourceInfo *NewTSI =
+ TrailingTSI ? S.ReplaceAutoTypeSourceInfo(TrailingTSI, Replacement)
+ : nullptr;
+ return {NewT, NewTSI};
}
static TypeSourceInfo *
@@ -3234,8 +3301,19 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
if (!D.getAttributes().empty())
distributeTypeAttrsFromDeclarator(state, T);
+ // Find the deduced type in this type. Look in the trailing return type if we
+ // have one, otherwise in the DeclSpec type.
+ // FIXME: The standard wording doesn't currently describe this.
+ DeducedType *Deduced = T->getContainedDeducedType();
+ bool DeducedIsTrailingReturnType = false;
+ if (Deduced && isa<AutoType>(Deduced) && D.hasTrailingReturnType()) {
+ QualType T = SemaRef.GetTypeFromParser(D.getTrailingReturnType());
+ Deduced = T.isNull() ? nullptr : T->getContainedDeducedType();
+ DeducedIsTrailingReturnType = true;
+ }
+
// C++11 [dcl.spec.auto]p5: reject 'auto' if it is not in an allowed context.
- if (DeducedType *Deduced = T->getContainedDeducedType()) {
+ if (Deduced) {
AutoType *Auto = dyn_cast<AutoType>(Deduced);
int Error = -1;
@@ -3246,21 +3324,21 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
bool IsDeducedReturnType = false;
switch (D.getContext()) {
- case DeclaratorContext::LambdaExprContext:
+ case DeclaratorContext::LambdaExpr:
// Declared return type of a lambda-declarator is implicit and is always
// 'auto'.
break;
- case DeclaratorContext::ObjCParameterContext:
- case DeclaratorContext::ObjCResultContext:
+ case DeclaratorContext::ObjCParameter:
+ case DeclaratorContext::ObjCResult:
Error = 0;
break;
- case DeclaratorContext::RequiresExprContext:
+ case DeclaratorContext::RequiresExpr:
Error = 22;
break;
- case DeclaratorContext::PrototypeContext:
- case DeclaratorContext::LambdaExprParameterContext: {
+ case DeclaratorContext::Prototype:
+ case DeclaratorContext::LambdaExprParameter: {
InventedTemplateParameterInfo *Info = nullptr;
- if (D.getContext() == DeclaratorContext::PrototypeContext) {
+ if (D.getContext() == DeclaratorContext::Prototype) {
// With concepts we allow 'auto' in function parameters.
if (!SemaRef.getLangOpts().CPlusPlus20 || !Auto ||
Auto->getKeyword() != AutoTypeKeyword::Auto) {
@@ -3269,10 +3347,6 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
} else if (!SemaRef.getCurScope()->isFunctionDeclarationScope()) {
Error = 21;
break;
- } else if (D.hasTrailingReturnType()) {
- // This might be OK, but we'll need to convert the trailing return
- // type later.
- break;
}
Info = &SemaRef.InventedParameterInfos.back();
@@ -3286,10 +3360,15 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
Info = SemaRef.getCurLambda();
assert(Info && "No LambdaScopeInfo on the stack!");
}
- T = InventTemplateParameter(state, T, nullptr, Auto, *Info);
+
+ // We'll deal with inventing template parameters for 'auto' in trailing
+ // return types when we pick up the trailing return type when processing
+ // the function chunk.
+ if (!DeducedIsTrailingReturnType)
+ T = InventTemplateParameter(state, T, nullptr, Auto, *Info).first;
break;
}
- case DeclaratorContext::MemberContext: {
+ case DeclaratorContext::Member: {
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
D.isFunctionDeclarator())
break;
@@ -3309,20 +3388,21 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
Error = 20; // Friend type
break;
}
- case DeclaratorContext::CXXCatchContext:
- case DeclaratorContext::ObjCCatchContext:
+ case DeclaratorContext::CXXCatch:
+ case DeclaratorContext::ObjCCatch:
Error = 7; // Exception declaration
break;
- case DeclaratorContext::TemplateParamContext:
- if (isa<DeducedTemplateSpecializationType>(Deduced))
- Error = 19; // Template parameter
+ case DeclaratorContext::TemplateParam:
+ if (isa<DeducedTemplateSpecializationType>(Deduced) &&
+ !SemaRef.getLangOpts().CPlusPlus20)
+ Error = 19; // Template parameter (until C++20)
else if (!SemaRef.getLangOpts().CPlusPlus17)
Error = 8; // Template parameter (until C++17)
break;
- case DeclaratorContext::BlockLiteralContext:
+ case DeclaratorContext::BlockLiteral:
Error = 9; // Block literal
break;
- case DeclaratorContext::TemplateArgContext:
+ case DeclaratorContext::TemplateArg:
// Within a template argument list, a deduced template specialization
// type will be reinterpreted as a template template argument.
if (isa<DeducedTemplateSpecializationType>(Deduced) &&
@@ -3330,47 +3410,47 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
D.getDeclSpec().getParsedSpecifiers() == DeclSpec::PQ_TypeSpecifier)
break;
LLVM_FALLTHROUGH;
- case DeclaratorContext::TemplateTypeArgContext:
+ case DeclaratorContext::TemplateTypeArg:
Error = 10; // Template type argument
break;
- case DeclaratorContext::AliasDeclContext:
- case DeclaratorContext::AliasTemplateContext:
+ case DeclaratorContext::AliasDecl:
+ case DeclaratorContext::AliasTemplate:
Error = 12; // Type alias
break;
- case DeclaratorContext::TrailingReturnContext:
- case DeclaratorContext::TrailingReturnVarContext:
+ case DeclaratorContext::TrailingReturn:
+ case DeclaratorContext::TrailingReturnVar:
if (!SemaRef.getLangOpts().CPlusPlus14 || !IsCXXAutoType)
Error = 13; // Function return type
IsDeducedReturnType = true;
break;
- case DeclaratorContext::ConversionIdContext:
+ case DeclaratorContext::ConversionId:
if (!SemaRef.getLangOpts().CPlusPlus14 || !IsCXXAutoType)
Error = 14; // conversion-type-id
IsDeducedReturnType = true;
break;
- case DeclaratorContext::FunctionalCastContext:
+ case DeclaratorContext::FunctionalCast:
if (isa<DeducedTemplateSpecializationType>(Deduced))
break;
LLVM_FALLTHROUGH;
- case DeclaratorContext::TypeNameContext:
+ case DeclaratorContext::TypeName:
Error = 15; // Generic
break;
- case DeclaratorContext::FileContext:
- case DeclaratorContext::BlockContext:
- case DeclaratorContext::ForContext:
- case DeclaratorContext::InitStmtContext:
- case DeclaratorContext::ConditionContext:
+ case DeclaratorContext::File:
+ case DeclaratorContext::Block:
+ case DeclaratorContext::ForInit:
+ case DeclaratorContext::SelectionInit:
+ case DeclaratorContext::Condition:
// FIXME: P0091R3 (erroneously) does not permit class template argument
// deduction in conditions, for-init-statements, and other declarations
// that are not simple-declarations.
break;
- case DeclaratorContext::CXXNewContext:
+ case DeclaratorContext::CXXNew:
// FIXME: P0091R3 does not permit class template argument deduction here,
// but we follow GCC and allow it anyway.
if (!IsCXXAutoType && !isa<DeducedTemplateSpecializationType>(Deduced))
Error = 17; // 'new' type
break;
- case DeclaratorContext::KNRTypeListContext:
+ case DeclaratorContext::KNRTypeList:
Error = 18; // K&R function parameter
break;
}
@@ -3384,20 +3464,6 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
(!SemaRef.getLangOpts().CPlusPlus11 || !IsCXXAutoType))
Error = 13;
- bool HaveTrailing = false;
-
- // C++11 [dcl.spec.auto]p2: 'auto' is always fine if the declarator
- // contains a trailing return type. That is only legal at the outermost
- // level. Check all declarator chunks (outermost first) anyway, to give
- // better diagnostics.
- // We don't support '__auto_type' with trailing return types.
- // FIXME: Should we only do this for 'auto' and not 'decltype(auto)'?
- if (SemaRef.getLangOpts().CPlusPlus11 && IsCXXAutoType &&
- D.hasTrailingReturnType()) {
- HaveTrailing = true;
- Error = -1;
- }
-
SourceRange AutoRange = D.getDeclSpec().getTypeSpecTypeLoc();
if (D.getName().getKind() == UnqualifiedIdKind::IK_ConversionFunctionId)
AutoRange = D.getName().getSourceRange();
@@ -3427,17 +3493,15 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
T = SemaRef.Context.IntTy;
D.setInvalidType(true);
- } else if (Auto && !HaveTrailing &&
- D.getContext() != DeclaratorContext::LambdaExprContext) {
+ } else if (Auto && D.getContext() != DeclaratorContext::LambdaExpr) {
// If there was a trailing return type, we already got
// warn_cxx98_compat_trailing_return_type in the parser.
SemaRef.Diag(AutoRange.getBegin(),
- D.getContext() ==
- DeclaratorContext::LambdaExprParameterContext
+ D.getContext() == DeclaratorContext::LambdaExprParameter
? diag::warn_cxx11_compat_generic_lambda
- : IsDeducedReturnType
- ? diag::warn_cxx11_compat_deduced_return_type
- : diag::warn_cxx98_compat_auto_type_specifier)
+ : IsDeducedReturnType
+ ? diag::warn_cxx11_compat_deduced_return_type
+ : diag::warn_cxx98_compat_auto_type_specifier)
<< AutoRange;
}
}
@@ -3448,50 +3512,50 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
// or enumeration in a type-specifier-seq.
unsigned DiagID = 0;
switch (D.getContext()) {
- case DeclaratorContext::TrailingReturnContext:
- case DeclaratorContext::TrailingReturnVarContext:
+ case DeclaratorContext::TrailingReturn:
+ case DeclaratorContext::TrailingReturnVar:
// Class and enumeration definitions are syntactically not allowed in
// trailing return types.
llvm_unreachable("parser should not have allowed this");
break;
- case DeclaratorContext::FileContext:
- case DeclaratorContext::MemberContext:
- case DeclaratorContext::BlockContext:
- case DeclaratorContext::ForContext:
- case DeclaratorContext::InitStmtContext:
- case DeclaratorContext::BlockLiteralContext:
- case DeclaratorContext::LambdaExprContext:
+ case DeclaratorContext::File:
+ case DeclaratorContext::Member:
+ case DeclaratorContext::Block:
+ case DeclaratorContext::ForInit:
+ case DeclaratorContext::SelectionInit:
+ case DeclaratorContext::BlockLiteral:
+ case DeclaratorContext::LambdaExpr:
// C++11 [dcl.type]p3:
// A type-specifier-seq shall not define a class or enumeration unless
// it appears in the type-id of an alias-declaration (7.1.3) that is not
// the declaration of a template-declaration.
- case DeclaratorContext::AliasDeclContext:
+ case DeclaratorContext::AliasDecl:
break;
- case DeclaratorContext::AliasTemplateContext:
+ case DeclaratorContext::AliasTemplate:
DiagID = diag::err_type_defined_in_alias_template;
break;
- case DeclaratorContext::TypeNameContext:
- case DeclaratorContext::FunctionalCastContext:
- case DeclaratorContext::ConversionIdContext:
- case DeclaratorContext::TemplateParamContext:
- case DeclaratorContext::CXXNewContext:
- case DeclaratorContext::CXXCatchContext:
- case DeclaratorContext::ObjCCatchContext:
- case DeclaratorContext::TemplateArgContext:
- case DeclaratorContext::TemplateTypeArgContext:
+ case DeclaratorContext::TypeName:
+ case DeclaratorContext::FunctionalCast:
+ case DeclaratorContext::ConversionId:
+ case DeclaratorContext::TemplateParam:
+ case DeclaratorContext::CXXNew:
+ case DeclaratorContext::CXXCatch:
+ case DeclaratorContext::ObjCCatch:
+ case DeclaratorContext::TemplateArg:
+ case DeclaratorContext::TemplateTypeArg:
DiagID = diag::err_type_defined_in_type_specifier;
break;
- case DeclaratorContext::PrototypeContext:
- case DeclaratorContext::LambdaExprParameterContext:
- case DeclaratorContext::ObjCParameterContext:
- case DeclaratorContext::ObjCResultContext:
- case DeclaratorContext::KNRTypeListContext:
- case DeclaratorContext::RequiresExprContext:
+ case DeclaratorContext::Prototype:
+ case DeclaratorContext::LambdaExprParameter:
+ case DeclaratorContext::ObjCParameter:
+ case DeclaratorContext::ObjCResult:
+ case DeclaratorContext::KNRTypeList:
+ case DeclaratorContext::RequiresExpr:
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
DiagID = diag::err_type_defined_in_param_type;
break;
- case DeclaratorContext::ConditionContext:
+ case DeclaratorContext::Condition:
// C++ 6.4p2:
// The type-specifier-seq shall not contain typedef and shall not declare
// a new class or enumeration.
@@ -3532,15 +3596,14 @@ static void warnAboutAmbiguousFunction(Sema &S, Declarator &D,
// Only warn if this declarator is declaring a function at block scope, and
// doesn't have a storage class (such as 'extern') specified.
if (!D.isFunctionDeclarator() ||
- D.getFunctionDefinitionKind() != FDK_Declaration ||
+ D.getFunctionDefinitionKind() != FunctionDefinitionKind::Declaration ||
!S.CurContext->isFunctionOrMethod() ||
- D.getDeclSpec().getStorageClassSpec()
- != DeclSpec::SCS_unspecified)
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_unspecified)
return;
// Inside a condition, a direct initializer is not permitted. We allow one to
// be parsed in order to give better diagnostics in condition parsing.
- if (D.getContext() == DeclaratorContext::ConditionContext)
+ if (D.getContext() == DeclaratorContext::Condition)
return;
SourceRange ParenRange(DeclType.Loc, DeclType.EndLoc);
@@ -3656,7 +3719,7 @@ static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
case DeclaratorChunk::Function:
// In a new-type-id, function chunks require parentheses.
- if (D.getContext() == DeclaratorContext::CXXNewContext)
+ if (D.getContext() == DeclaratorContext::CXXNew)
return;
// FIXME: "A(f())" deserves a vexing-parse warning, not just a
// redundant-parens warning, but we don't know whether the function
@@ -3773,7 +3836,7 @@ static CallingConv getCCForDeclaratorChunk(
// in a member pointer.
IsCXXInstanceMethod =
D.getTypeObject(I).Kind == DeclaratorChunk::MemberPointer;
- } else if (D.getContext() == DeclaratorContext::LambdaExprContext) {
+ } else if (D.getContext() == DeclaratorContext::LambdaExpr) {
// This can only be a call operator for a lambda, which is an instance
// method.
IsCXXInstanceMethod = true;
@@ -3832,6 +3895,11 @@ IdentifierInfo *Sema::getNullabilityKeyword(NullabilityKind nullability) {
Ident__Nullable = PP.getIdentifierInfo("_Nullable");
return Ident__Nullable;
+ case NullabilityKind::NullableResult:
+ if (!Ident__Nullable_result)
+ Ident__Nullable_result = PP.getIdentifierInfo("_Nullable_result");
+ return Ident__Nullable_result;
+
case NullabilityKind::Unspecified:
if (!Ident__Null_unspecified)
Ident__Null_unspecified = PP.getIdentifierInfo("_Null_unspecified");
@@ -3854,6 +3922,7 @@ static bool hasNullabilityAttr(const ParsedAttributesView &attrs) {
for (const ParsedAttr &AL : attrs) {
if (AL.getKind() == ParsedAttr::AT_TypeNonNull ||
AL.getKind() == ParsedAttr::AT_TypeNullable ||
+ AL.getKind() == ParsedAttr::AT_TypeNullableResult ||
AL.getKind() == ParsedAttr::AT_TypeNullUnspecified)
return true;
}
@@ -3998,32 +4067,9 @@ classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator,
if (auto recordType = type->getAs<RecordType>()) {
RecordDecl *recordDecl = recordType->getDecl();
- bool isCFError = false;
- if (S.CFError) {
- // If we already know about CFError, test it directly.
- isCFError = (S.CFError == recordDecl);
- } else {
- // Check whether this is CFError, which we identify based on its bridge
- // to NSError. CFErrorRef used to be declared with "objc_bridge" but is
- // now declared with "objc_bridge_mutable", so look for either one of
- // the two attributes.
- if (recordDecl->getTagKind() == TTK_Struct && numNormalPointers > 0) {
- IdentifierInfo *bridgedType = nullptr;
- if (auto bridgeAttr = recordDecl->getAttr<ObjCBridgeAttr>())
- bridgedType = bridgeAttr->getBridgedType();
- else if (auto bridgeAttr =
- recordDecl->getAttr<ObjCBridgeMutableAttr>())
- bridgedType = bridgeAttr->getBridgedType();
-
- if (bridgedType == S.getNSErrorIdent()) {
- S.CFError = recordDecl;
- isCFError = true;
- }
- }
- }
-
// If this is CFErrorRef*, report it as such.
- if (isCFError && numNormalPointers == 2 && numTypeSpecifierPointers < 2) {
+ if (numNormalPointers == 2 && numTypeSpecifierPointers < 2 &&
+ S.isCFError(recordDecl)) {
return PointerDeclaratorKind::CFErrorRefPointer;
}
break;
@@ -4047,6 +4093,31 @@ classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator,
}
}
+bool Sema::isCFError(RecordDecl *RD) {
+ // If we already know about CFError, test it directly.
+ if (CFError)
+ return CFError == RD;
+
+ // Check whether this is CFError, which we identify based on its bridge to
+ // NSError. CFErrorRef used to be declared with "objc_bridge" but is now
+ // declared with "objc_bridge_mutable", so look for either one of the two
+ // attributes.
+ if (RD->getTagKind() == TTK_Struct) {
+ IdentifierInfo *bridgedType = nullptr;
+ if (auto bridgeAttr = RD->getAttr<ObjCBridgeAttr>())
+ bridgedType = bridgeAttr->getBridgedType();
+ else if (auto bridgeAttr = RD->getAttr<ObjCBridgeMutableAttr>())
+ bridgedType = bridgeAttr->getBridgedType();
+
+ if (bridgedType == getNSErrorIdent()) {
+ CFError = RD;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static FileID getNullabilityCompletenessCheckFileID(Sema &S,
SourceLocation loc) {
// If we're anywhere in a function, method, or closure context, don't perform
@@ -4086,7 +4157,8 @@ static FileID getNullabilityCompletenessCheckFileID(Sema &S,
/// Creates a fix-it to insert a C-style nullability keyword at \p pointerLoc,
/// taking into account whitespace before and after.
-static void fixItNullability(Sema &S, DiagnosticBuilder &Diag,
+template <typename DiagBuilderT>
+static void fixItNullability(Sema &S, DiagBuilderT &Diag,
SourceLocation PointerLoc,
NullabilityKind Nullability) {
assert(PointerLoc.isValid());
@@ -4269,6 +4341,9 @@ static Attr *createNullabilityAttr(ASTContext &Ctx, ParsedAttr &Attr,
case NullabilityKind::Nullable:
return createSimpleAttr<TypeNullableAttr>(Ctx, Attr);
+ case NullabilityKind::NullableResult:
+ return createSimpleAttr<TypeNullableResultAttr>(Ctx, Attr);
+
case NullabilityKind::Unspecified:
return createSimpleAttr<TypeNullUnspecifiedAttr>(Ctx, Attr);
}
@@ -4312,9 +4387,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Does this declaration declare a typedef-name?
bool IsTypedefName =
- D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef ||
- D.getContext() == DeclaratorContext::AliasDeclContext ||
- D.getContext() == DeclaratorContext::AliasTemplateContext;
+ D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef ||
+ D.getContext() == DeclaratorContext::AliasDecl ||
+ D.getContext() == DeclaratorContext::AliasTemplate;
// Does T refer to a function type with a cv-qualifier or a ref-qualifier?
bool IsQualifiedFunction = T->isFunctionProtoType() &&
@@ -4443,15 +4518,15 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
} else {
bool isFunctionOrMethod = false;
switch (auto context = state.getDeclarator().getContext()) {
- case DeclaratorContext::ObjCParameterContext:
- case DeclaratorContext::ObjCResultContext:
- case DeclaratorContext::PrototypeContext:
- case DeclaratorContext::TrailingReturnContext:
- case DeclaratorContext::TrailingReturnVarContext:
+ case DeclaratorContext::ObjCParameter:
+ case DeclaratorContext::ObjCResult:
+ case DeclaratorContext::Prototype:
+ case DeclaratorContext::TrailingReturn:
+ case DeclaratorContext::TrailingReturnVar:
isFunctionOrMethod = true;
LLVM_FALLTHROUGH;
- case DeclaratorContext::MemberContext:
+ case DeclaratorContext::Member:
if (state.getDeclarator().isObjCIvar() && !isFunctionOrMethod) {
complainAboutMissingNullability = CAMN_No;
break;
@@ -4465,8 +4540,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
LLVM_FALLTHROUGH;
- case DeclaratorContext::FileContext:
- case DeclaratorContext::KNRTypeListContext: {
+ case DeclaratorContext::File:
+ case DeclaratorContext::KNRTypeList: {
complainAboutMissingNullability = CAMN_Yes;
// Nullability inference depends on the type and declarator.
@@ -4482,9 +4557,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (inAssumeNonNullRegion) {
complainAboutInferringWithinChunk = wrappingKind;
inferNullability = NullabilityKind::NonNull;
- inferNullabilityCS =
- (context == DeclaratorContext::ObjCParameterContext ||
- context == DeclaratorContext::ObjCResultContext);
+ inferNullabilityCS = (context == DeclaratorContext::ObjCParameter ||
+ context == DeclaratorContext::ObjCResult);
}
break;
@@ -4520,28 +4594,28 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
break;
}
- case DeclaratorContext::ConversionIdContext:
+ case DeclaratorContext::ConversionId:
complainAboutMissingNullability = CAMN_Yes;
break;
- case DeclaratorContext::AliasDeclContext:
- case DeclaratorContext::AliasTemplateContext:
- case DeclaratorContext::BlockContext:
- case DeclaratorContext::BlockLiteralContext:
- case DeclaratorContext::ConditionContext:
- case DeclaratorContext::CXXCatchContext:
- case DeclaratorContext::CXXNewContext:
- case DeclaratorContext::ForContext:
- case DeclaratorContext::InitStmtContext:
- case DeclaratorContext::LambdaExprContext:
- case DeclaratorContext::LambdaExprParameterContext:
- case DeclaratorContext::ObjCCatchContext:
- case DeclaratorContext::TemplateParamContext:
- case DeclaratorContext::TemplateArgContext:
- case DeclaratorContext::TemplateTypeArgContext:
- case DeclaratorContext::TypeNameContext:
- case DeclaratorContext::FunctionalCastContext:
- case DeclaratorContext::RequiresExprContext:
+ case DeclaratorContext::AliasDecl:
+ case DeclaratorContext::AliasTemplate:
+ case DeclaratorContext::Block:
+ case DeclaratorContext::BlockLiteral:
+ case DeclaratorContext::Condition:
+ case DeclaratorContext::CXXCatch:
+ case DeclaratorContext::CXXNew:
+ case DeclaratorContext::ForInit:
+ case DeclaratorContext::SelectionInit:
+ case DeclaratorContext::LambdaExpr:
+ case DeclaratorContext::LambdaExprParameter:
+ case DeclaratorContext::ObjCCatch:
+ case DeclaratorContext::TemplateParam:
+ case DeclaratorContext::TemplateArg:
+ case DeclaratorContext::TemplateTypeArg:
+ case DeclaratorContext::TypeName:
+ case DeclaratorContext::FunctionalCast:
+ case DeclaratorContext::RequiresExpr:
// Don't infer in these contexts.
break;
}
@@ -4778,7 +4852,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// array type, ...
if (ASM == ArrayType::Static || ATI.TypeQuals) {
if (!(D.isPrototypeContext() ||
- D.getContext() == DeclaratorContext::KNRTypeListContext)) {
+ D.getContext() == DeclaratorContext::KNRTypeList)) {
S.Diag(DeclType.Loc, diag::err_array_static_outside_prototype) <<
(ASM == ArrayType::Static ? "'static'" : "type qualifier");
// Remove the 'static' and the type qualifiers.
@@ -4802,12 +4876,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
const AutoType *AT = T->getContainedAutoType();
// Allow arrays of auto if we are a generic lambda parameter.
// i.e. [](auto (&array)[5]) { return array[0]; }; OK
- if (AT &&
- D.getContext() != DeclaratorContext::LambdaExprParameterContext) {
+ if (AT && D.getContext() != DeclaratorContext::LambdaExprParameter) {
// We've already diagnosed this for decltype(auto).
if (!AT->isDecltypeAuto())
S.Diag(DeclType.Loc, diag::err_illegal_decl_array_of_auto)
- << getPrintableNameForEntity(Name) << T;
+ << getPrintableNameForEntity(Name) << T;
T = QualType();
break;
}
@@ -4866,7 +4939,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
<< D.getSourceRange();
D.setInvalidType(true);
}
- } else if (D.getContext() != DeclaratorContext::LambdaExprContext &&
+ } else if (D.getContext() != DeclaratorContext::LambdaExpr &&
(T.hasQualifiers() || !isa<AutoType>(T) ||
cast<AutoType>(T)->getKeyword() !=
AutoTypeKeyword::Auto ||
@@ -4881,12 +4954,20 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// An error occurred parsing the trailing return type.
T = Context.IntTy;
D.setInvalidType(true);
- } else if (S.getLangOpts().CPlusPlus20)
- // Handle cases like: `auto f() -> auto` or `auto f() -> C auto`.
- if (AutoType *Auto = T->getContainedAutoType())
- if (S.getCurScope()->isFunctionDeclarationScope())
- T = InventTemplateParameter(state, T, TInfo, Auto,
- S.InventedParameterInfos.back());
+ } else if (AutoType *Auto = T->getContainedAutoType()) {
+ // If the trailing return type contains an `auto`, we may need to
+ // invent a template parameter for it, for cases like
+ // `auto f() -> C auto` or `[](auto (*p) -> auto) {}`.
+ InventedTemplateParameterInfo *InventedParamInfo = nullptr;
+ if (D.getContext() == DeclaratorContext::Prototype)
+ InventedParamInfo = &S.InventedParameterInfos.back();
+ else if (D.getContext() == DeclaratorContext::LambdaExprParameter)
+ InventedParamInfo = S.getCurLambda();
+ if (InventedParamInfo) {
+ std::tie(T, TInfo) = InventTemplateParameter(
+ state, T, TInfo, Auto, *InventedParamInfo);
+ }
+ }
} else {
// This function type is not the type of the entity being declared,
// so checking the 'auto' is not the responsibility of this chunk.
@@ -4902,7 +4983,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Last processing chunk in block context means this function chunk
// represents the block.
if (chunkIndex == 0 &&
- D.getContext() == DeclaratorContext::BlockLiteralContext)
+ D.getContext() == DeclaratorContext::BlockLiteral)
diagID = diag::err_block_returning_array_function;
S.Diag(DeclType.Loc, diagID) << T->isFunctionType() << T;
T = Context.IntTy;
@@ -4938,6 +5019,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// (s6.9.e and s6.12.5 OpenCL v2.0) except for printf.
// We also allow here any toolchain reserved identifiers.
if (FTI.isVariadic &&
+ !S.getOpenCLOptions().isEnabled("__cl_clang_variadic_functions") &&
!(D.getIdentifier() &&
((D.getIdentifier()->getName() == "printf" &&
(LangOpts.OpenCLCPlusPlus || LangOpts.OpenCLVersion >= 120)) ||
@@ -4980,7 +5062,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
!(S.getLangOpts().CPlusPlus &&
(T->isDependentType() || T->isRecordType()))) {
if (T->isVoidType() && !S.getLangOpts().CPlusPlus &&
- D.getFunctionDefinitionKind() == FDK_Definition) {
+ D.getFunctionDefinitionKind() ==
+ FunctionDefinitionKind::Definition) {
// [6.9.1/3] qualified void return is invalid on a C
// function definition. Apparently ok on declarations and
// in C++ though (!)
@@ -5043,8 +5126,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (IsTypedefName && FTI.getExceptionSpecType() && !LangOpts.CPlusPlus17)
S.Diag(FTI.getExceptionSpecLocBeg(),
diag::err_exception_spec_in_typedef)
- << (D.getContext() == DeclaratorContext::AliasDeclContext ||
- D.getContext() == DeclaratorContext::AliasTemplateContext);
+ << (D.getContext() == DeclaratorContext::AliasDecl ||
+ D.getContext() == DeclaratorContext::AliasTemplate);
// If we see "T var();" or "T var(T());" at block scope, it is probably
// an attempt to initialize a variable, not a function declaration.
@@ -5111,7 +5194,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// is an incomplete type (C99 6.2.5p19) and function decls cannot
// have parameters of incomplete type.
if (FTI.NumParams != 1 || FTI.isVariadic) {
- S.Diag(DeclType.Loc, diag::err_void_only_param);
+ S.Diag(FTI.Params[i].IdentLoc, diag::err_void_only_param);
ParamTy = Context.IntTy;
Param->setType(ParamTy);
} else if (FTI.Params[i].Ident) {
@@ -5225,9 +5308,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
.getScopeRep()
->getKind() == NestedNameSpecifier::TypeSpec) ||
state.getDeclarator().getContext() ==
- DeclaratorContext::MemberContext ||
+ DeclaratorContext::Member ||
state.getDeclarator().getContext() ==
- DeclaratorContext::LambdaExprContext;
+ DeclaratorContext::LambdaExpr;
};
if (state.getSema().getLangOpts().OpenCLCPlusPlus && IsClassMember()) {
@@ -5351,7 +5434,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// The empty list in a function declarator that is not part of a definition
// of that function specifies that no information about the number or types
// of the parameters is supplied.
- if (!LangOpts.CPlusPlus && D.getFunctionDefinitionKind() == FDK_Declaration) {
+ if (!LangOpts.CPlusPlus &&
+ D.getFunctionDefinitionKind() == FunctionDefinitionKind::Declaration) {
bool IsBlock = false;
for (const DeclaratorChunk &DeclType : D.type_objects()) {
switch (DeclType.Kind) {
@@ -5394,8 +5478,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (D.getName().getKind() == UnqualifiedIdKind::IK_DeductionGuideName)
Kind = DeductionGuide;
else if (!D.getCXXScopeSpec().isSet()) {
- if ((D.getContext() == DeclaratorContext::MemberContext ||
- D.getContext() == DeclaratorContext::LambdaExprContext) &&
+ if ((D.getContext() == DeclaratorContext::Member ||
+ D.getContext() == DeclaratorContext::LambdaExpr) &&
!D.getDeclSpec().isFriendSpecified())
Kind = Member;
} else {
@@ -5423,9 +5507,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (IsQualifiedFunction &&
!(Kind == Member &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) &&
- !IsTypedefName &&
- D.getContext() != DeclaratorContext::TemplateArgContext &&
- D.getContext() != DeclaratorContext::TemplateTypeArgContext) {
+ !IsTypedefName && D.getContext() != DeclaratorContext::TemplateArg &&
+ D.getContext() != DeclaratorContext::TemplateTypeArg) {
SourceLocation Loc = D.getBeginLoc();
SourceRange RemovalRange;
unsigned I;
@@ -5480,15 +5563,15 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C++0x [dcl.constexpr]p9:
// A constexpr specifier used in an object declaration declares the object
// as const.
- if (D.getDeclSpec().getConstexprSpecifier() == CSK_constexpr &&
+ if (D.getDeclSpec().getConstexprSpecifier() == ConstexprSpecKind::Constexpr &&
T->isObjectType())
T.addConst();
// C++2a [dcl.fct]p4:
// A parameter with volatile-qualified type is deprecated
if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus20 &&
- (D.getContext() == DeclaratorContext::PrototypeContext ||
- D.getContext() == DeclaratorContext::LambdaExprParameterContext))
+ (D.getContext() == DeclaratorContext::Prototype ||
+ D.getContext() == DeclaratorContext::LambdaExprParameter))
S.Diag(D.getIdentifierLoc(), diag::warn_deprecated_volatile_param) << T;
// If there was an ellipsis in the declarator, the declaration declares a
@@ -5499,9 +5582,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// only be used in a parameter-declaration. Such a parameter-declaration
// is a parameter pack (14.5.3). [...]
switch (D.getContext()) {
- case DeclaratorContext::PrototypeContext:
- case DeclaratorContext::LambdaExprParameterContext:
- case DeclaratorContext::RequiresExprContext:
+ case DeclaratorContext::Prototype:
+ case DeclaratorContext::LambdaExprParameter:
+ case DeclaratorContext::RequiresExpr:
// C++0x [dcl.fct]p13:
// [...] When it is part of a parameter-declaration-clause, the
// parameter pack is a function parameter pack (14.5.3). The type T
@@ -5518,10 +5601,10 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
<< T << D.getSourceRange();
D.setEllipsisLoc(SourceLocation());
} else {
- T = Context.getPackExpansionType(T, None);
+ T = Context.getPackExpansionType(T, None, /*ExpectPackInType=*/false);
}
break;
- case DeclaratorContext::TemplateParamContext:
+ case DeclaratorContext::TemplateParam:
// C++0x [temp.param]p15:
// If a template-parameter is a [...] is a parameter-declaration that
// declares a parameter pack (8.3.5), then the template-parameter is a
@@ -5539,31 +5622,29 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
: diag::ext_variadic_templates);
break;
- case DeclaratorContext::FileContext:
- case DeclaratorContext::KNRTypeListContext:
- case DeclaratorContext::ObjCParameterContext: // FIXME: special diagnostic
- // here?
- case DeclaratorContext::ObjCResultContext: // FIXME: special diagnostic
- // here?
- case DeclaratorContext::TypeNameContext:
- case DeclaratorContext::FunctionalCastContext:
- case DeclaratorContext::CXXNewContext:
- case DeclaratorContext::AliasDeclContext:
- case DeclaratorContext::AliasTemplateContext:
- case DeclaratorContext::MemberContext:
- case DeclaratorContext::BlockContext:
- case DeclaratorContext::ForContext:
- case DeclaratorContext::InitStmtContext:
- case DeclaratorContext::ConditionContext:
- case DeclaratorContext::CXXCatchContext:
- case DeclaratorContext::ObjCCatchContext:
- case DeclaratorContext::BlockLiteralContext:
- case DeclaratorContext::LambdaExprContext:
- case DeclaratorContext::ConversionIdContext:
- case DeclaratorContext::TrailingReturnContext:
- case DeclaratorContext::TrailingReturnVarContext:
- case DeclaratorContext::TemplateArgContext:
- case DeclaratorContext::TemplateTypeArgContext:
+ case DeclaratorContext::File:
+ case DeclaratorContext::KNRTypeList:
+ case DeclaratorContext::ObjCParameter: // FIXME: special diagnostic here?
+ case DeclaratorContext::ObjCResult: // FIXME: special diagnostic here?
+ case DeclaratorContext::TypeName:
+ case DeclaratorContext::FunctionalCast:
+ case DeclaratorContext::CXXNew:
+ case DeclaratorContext::AliasDecl:
+ case DeclaratorContext::AliasTemplate:
+ case DeclaratorContext::Member:
+ case DeclaratorContext::Block:
+ case DeclaratorContext::ForInit:
+ case DeclaratorContext::SelectionInit:
+ case DeclaratorContext::Condition:
+ case DeclaratorContext::CXXCatch:
+ case DeclaratorContext::ObjCCatch:
+ case DeclaratorContext::BlockLiteral:
+ case DeclaratorContext::LambdaExpr:
+ case DeclaratorContext::ConversionId:
+ case DeclaratorContext::TrailingReturn:
+ case DeclaratorContext::TrailingReturnVar:
+ case DeclaratorContext::TemplateArg:
+ case DeclaratorContext::TemplateTypeArg:
// FIXME: We may want to allow parameter packs in block-literal contexts
// in the future.
S.Diag(D.getEllipsisLoc(),
@@ -5816,9 +5897,9 @@ namespace {
// Set info for the written builtin specifiers.
TL.getWrittenBuiltinSpecs() = DS.getWrittenBuiltinSpecs();
// Try to have a meaningful source location.
- if (TL.getWrittenSignSpec() != TSS_unspecified)
+ if (TL.getWrittenSignSpec() != TypeSpecifierSign::Unspecified)
TL.expandBuiltinRange(DS.getTypeSpecSignLoc());
- if (TL.getWrittenWidthSpec() != TSW_unspecified)
+ if (TL.getWrittenWidthSpec() != TypeSpecifierWidth::Unspecified)
TL.expandBuiltinRange(DS.getTypeSpecWidthRange());
}
}
@@ -6005,7 +6086,7 @@ namespace {
}
// Finally fill in MemberPointerLocInfo fields.
- TL.setStarLoc(SourceLocation::getFromRawEncoding(Chunk.Mem.StarLoc));
+ TL.setStarLoc(Chunk.Mem.StarLoc);
TL.setClassTInfo(ClsTInfo);
}
void VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
@@ -6054,6 +6135,17 @@ namespace {
void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
TL.setExpansionLoc(Chunk.Loc);
}
+ void VisitVectorTypeLoc(VectorTypeLoc TL) { TL.setNameLoc(Chunk.Loc); }
+ void VisitDependentVectorTypeLoc(DependentVectorTypeLoc TL) {
+ TL.setNameLoc(Chunk.Loc);
+ }
+ void VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
+ TL.setNameLoc(Chunk.Loc);
+ }
+ void
+ VisitDependentSizedExtVectorTypeLoc(DependentSizedExtVectorTypeLoc TL) {
+ TL.setNameLoc(Chunk.Loc);
+ }
void VisitTypeLoc(TypeLoc TL) {
llvm_unreachable("unsupported TypeLoc kind in declarator!");
@@ -6071,7 +6163,7 @@ static void fillAtomicQualLoc(AtomicTypeLoc ATL, const DeclaratorChunk &Chunk) {
llvm_unreachable("cannot be _Atomic qualified");
case DeclaratorChunk::Pointer:
- Loc = SourceLocation::getFromRawEncoding(Chunk.Ptr.AtomicQualLoc);
+ Loc = Chunk.Ptr.AtomicQualLoc;
break;
case DeclaratorChunk::BlockPointer:
@@ -6224,9 +6316,9 @@ TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
// to apply them to the actual parameter declaration.
// Likewise, we don't want to do this for alias declarations, because
// we are actually going to build a declaration from this eventually.
- if (D.getContext() != DeclaratorContext::ObjCParameterContext &&
- D.getContext() != DeclaratorContext::AliasDeclContext &&
- D.getContext() != DeclaratorContext::AliasTemplateContext)
+ if (D.getContext() != DeclaratorContext::ObjCParameter &&
+ D.getContext() != DeclaratorContext::AliasDecl &&
+ D.getContext() != DeclaratorContext::AliasTemplate)
checkUnusedDeclAttributes(D);
if (getLangOpts().CPlusPlus) {
@@ -6254,13 +6346,15 @@ static bool BuildAddressSpaceIndex(Sema &S, LangAS &ASIdx,
const Expr *AddrSpace,
SourceLocation AttrLoc) {
if (!AddrSpace->isValueDependent()) {
- llvm::APSInt addrSpace(32);
- if (!AddrSpace->isIntegerConstantExpr(addrSpace, S.Context)) {
+ Optional<llvm::APSInt> OptAddrSpace =
+ AddrSpace->getIntegerConstantExpr(S.Context);
+ if (!OptAddrSpace) {
S.Diag(AttrLoc, diag::err_attribute_argument_type)
<< "'address_space'" << AANT_ArgumentIntegerConstant
<< AddrSpace->getSourceRange();
return false;
}
+ llvm::APSInt &addrSpace = *OptAddrSpace;
// Bounds checking.
if (addrSpace.isSigned()) {
@@ -6353,25 +6447,7 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
return;
}
- Expr *ASArgExpr;
- if (Attr.isArgIdent(0)) {
- // Special case where the argument is a template id.
- CXXScopeSpec SS;
- SourceLocation TemplateKWLoc;
- UnqualifiedId id;
- id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
-
- ExprResult AddrSpace = S.ActOnIdExpression(
- S.getCurScope(), SS, TemplateKWLoc, id, /*HasTrailingLParen=*/false,
- /*IsAddressOfOperand=*/false);
- if (AddrSpace.isInvalid())
- return;
-
- ASArgExpr = static_cast<Expr *>(AddrSpace.get());
- } else {
- ASArgExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
- }
-
+ Expr *ASArgExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
LangAS ASIdx;
if (!BuildAddressSpaceIndex(S, ASIdx, ASArgExpr, Attr.getLoc())) {
Attr.setInvalid();
@@ -6858,32 +6934,32 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
break;
}
- llvm::SmallSet<attr::Kind, 2> Attrs;
+ std::bitset<attr::LastAttr> Attrs;
attr::Kind NewAttrKind = A->getKind();
QualType Desugared = Type;
const AttributedType *AT = dyn_cast<AttributedType>(Type);
while (AT) {
- Attrs.insert(AT->getAttrKind());
+ Attrs[AT->getAttrKind()] = true;
Desugared = AT->getModifiedType();
AT = dyn_cast<AttributedType>(Desugared);
}
// You cannot specify duplicate type attributes, so if the attribute has
// already been applied, flag it.
- if (Attrs.count(NewAttrKind)) {
+ if (Attrs[NewAttrKind]) {
S.Diag(PAttr.getLoc(), diag::warn_duplicate_attribute_exact) << PAttr;
return true;
}
- Attrs.insert(NewAttrKind);
+ Attrs[NewAttrKind] = true;
// You cannot have both __sptr and __uptr on the same type, nor can you
// have __ptr32 and __ptr64.
- if (Attrs.count(attr::Ptr32) && Attrs.count(attr::Ptr64)) {
+ if (Attrs[attr::Ptr32] && Attrs[attr::Ptr64]) {
S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__ptr32'"
<< "'__ptr64'";
return true;
- } else if (Attrs.count(attr::SPtr) && Attrs.count(attr::UPtr)) {
+ } else if (Attrs[attr::SPtr] && Attrs[attr::UPtr]) {
S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__sptr'"
<< "'__uptr'";
@@ -6909,12 +6985,12 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
LangAS ASIdx = LangAS::Default;
uint64_t PtrWidth = S.Context.getTargetInfo().getPointerWidth(0);
if (PtrWidth == 32) {
- if (Attrs.count(attr::Ptr64))
+ if (Attrs[attr::Ptr64])
ASIdx = LangAS::ptr64;
- else if (Attrs.count(attr::UPtr))
+ else if (Attrs[attr::UPtr])
ASIdx = LangAS::ptr32_uptr;
- } else if (PtrWidth == 64 && Attrs.count(attr::Ptr32)) {
- if (Attrs.count(attr::UPtr))
+ } else if (PtrWidth == 64 && Attrs[attr::Ptr32]) {
+ if (Attrs[attr::UPtr])
ASIdx = LangAS::ptr32_uptr;
else
ASIdx = LangAS::ptr32_sptr;
@@ -6937,6 +7013,9 @@ static NullabilityKind mapNullabilityAttrKind(ParsedAttr::Kind kind) {
case ParsedAttr::AT_TypeNullable:
return NullabilityKind::Nullable;
+ case ParsedAttr::AT_TypeNullableResult:
+ return NullabilityKind::NullableResult;
+
case ParsedAttr::AT_TypeNullUnspecified:
return NullabilityKind::Unspecified;
@@ -7574,25 +7653,7 @@ static void HandleVectorSizeAttr(QualType &CurType, const ParsedAttr &Attr,
return;
}
- Expr *SizeExpr;
- // Special case where the argument is a template id.
- if (Attr.isArgIdent(0)) {
- CXXScopeSpec SS;
- SourceLocation TemplateKWLoc;
- UnqualifiedId Id;
- Id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
-
- ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc,
- Id, /*HasTrailingLParen=*/false,
- /*IsAddressOfOperand=*/false);
-
- if (Size.isInvalid())
- return;
- SizeExpr = Size.get();
- } else {
- SizeExpr = Attr.getArgAsExpr(0);
- }
-
+ Expr *SizeExpr = Attr.getArgAsExpr(0);
QualType T = S.BuildVectorType(CurType, SizeExpr, Attr.getLoc());
if (!T.isNull())
CurType = T;
@@ -7611,28 +7672,8 @@ static void HandleExtVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
return;
}
- Expr *sizeExpr;
-
- // Special case where the argument is a template id.
- if (Attr.isArgIdent(0)) {
- CXXScopeSpec SS;
- SourceLocation TemplateKWLoc;
- UnqualifiedId id;
- id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
-
- ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc,
- id, /*HasTrailingLParen=*/false,
- /*IsAddressOfOperand=*/false);
- if (Size.isInvalid())
- return;
-
- sizeExpr = Size.get();
- } else {
- sizeExpr = Attr.getArgAsExpr(0);
- }
-
- // Create the vector type.
- QualType T = S.BuildExtVectorType(CurType, sizeExpr, Attr.getLoc());
+ Expr *SizeExpr = Attr.getArgAsExpr(0);
+ QualType T = S.BuildExtVectorType(CurType, SizeExpr, Attr.getLoc());
if (!T.isNull())
CurType = T;
}
@@ -7686,6 +7727,22 @@ static bool isPermittedNeonBaseType(QualType &Ty,
BTy->getKind() == BuiltinType::BFloat16;
}
+static bool verifyValidIntegerConstantExpr(Sema &S, const ParsedAttr &Attr,
+ llvm::APSInt &Result) {
+ const auto *AttrExpr = Attr.getArgAsExpr(0);
+ if (!AttrExpr->isTypeDependent() && !AttrExpr->isValueDependent()) {
+ if (Optional<llvm::APSInt> Res =
+ AttrExpr->getIntegerConstantExpr(S.Context)) {
+ Result = *Res;
+ return true;
+ }
+ }
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+ << Attr << AANT_ArgumentIntegerConstant << AttrExpr->getSourceRange();
+ Attr.setInvalid();
+ return false;
+}
+
/// HandleNeonVectorTypeAttr - The "neon_vector_type" and
/// "neon_polyvector_type" attributes are used to create vector types that
/// are mangled according to ARM's ABI. Otherwise, these types are identical
@@ -7699,7 +7756,8 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
// not to need a separate attribute)
if (!S.Context.getTargetInfo().hasFeature("neon") &&
!S.Context.getTargetInfo().hasFeature("mve")) {
- S.Diag(Attr.getLoc(), diag::err_attribute_unsupported) << Attr;
+ S.Diag(Attr.getLoc(), diag::err_attribute_unsupported)
+ << Attr << "'neon' or 'mve'";
Attr.setInvalid();
return;
}
@@ -7711,16 +7769,10 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
return;
}
// The number of elements must be an ICE.
- Expr *numEltsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
llvm::APSInt numEltsInt(32);
- if (numEltsExpr->isTypeDependent() || numEltsExpr->isValueDependent() ||
- !numEltsExpr->isIntegerConstantExpr(numEltsInt, S.Context)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
- << Attr << AANT_ArgumentIntegerConstant
- << numEltsExpr->getSourceRange();
- Attr.setInvalid();
+ if (!verifyValidIntegerConstantExpr(S, Attr, numEltsInt))
return;
- }
+
// Only certain element types are supported for Neon vectors.
if (!isPermittedNeonBaseType(CurType, VecKind, S)) {
S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
@@ -7741,6 +7793,71 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
CurType = S.Context.getVectorType(CurType, numElts, VecKind);
}
+/// HandleArmSveVectorBitsTypeAttr - The "arm_sve_vector_bits" attribute is
+/// used to create fixed-length versions of sizeless SVE types defined by
+/// the ACLE, such as svint32_t and svbool_t.
+static void HandleArmSveVectorBitsTypeAttr(QualType &CurType, ParsedAttr &Attr,
+ Sema &S) {
+ // Target must have SVE.
+ if (!S.Context.getTargetInfo().hasFeature("sve")) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_unsupported) << Attr << "'sve'";
+ Attr.setInvalid();
+ return;
+ }
+
+ // Attribute is unsupported if '-msve-vector-bits=<bits>' isn't specified.
+ if (!S.getLangOpts().ArmSveVectorBits) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_arm_feature_sve_bits_unsupported)
+ << Attr;
+ Attr.setInvalid();
+ return;
+ }
+
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr << 1;
+ Attr.setInvalid();
+ return;
+ }
+
+ // The vector size must be an integer constant expression.
+ llvm::APSInt SveVectorSizeInBits(32);
+ if (!verifyValidIntegerConstantExpr(S, Attr, SveVectorSizeInBits))
+ return;
+
+ unsigned VecSize = static_cast<unsigned>(SveVectorSizeInBits.getZExtValue());
+
+ // The attribute vector size must match -msve-vector-bits.
+ if (VecSize != S.getLangOpts().ArmSveVectorBits) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_bad_sve_vector_size)
+ << VecSize << S.getLangOpts().ArmSveVectorBits;
+ Attr.setInvalid();
+ return;
+ }
+
+ // Attribute can only be attached to a single SVE vector or predicate type.
+ if (!CurType->isVLSTBuiltinType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_sve_type)
+ << Attr << CurType;
+ Attr.setInvalid();
+ return;
+ }
+
+ const auto *BT = CurType->castAs<BuiltinType>();
+
+ QualType EltType = CurType->getSveEltType(S.Context);
+ unsigned TypeSize = S.Context.getTypeSize(EltType);
+ VectorType::VectorKind VecKind = VectorType::SveFixedLengthDataVector;
+ if (BT->getKind() == BuiltinType::SveBool) {
+ // Predicates are represented as i8.
+ VecSize /= S.Context.getCharWidth() * S.Context.getCharWidth();
+ VecKind = VectorType::SveFixedLengthPredicateVector;
+ } else
+ VecSize /= TypeSize;
+ CurType = S.Context.getVectorType(EltType, VecSize, VecKind);
+}
+
static void HandleArmMveStrictPolymorphismAttr(TypeProcessingState &State,
QualType &CurType,
ParsedAttr &Attr) {
@@ -7808,7 +7925,7 @@ static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr,
diag::note_opencl_typedef_access_qualifier) << PrevAccessQual;
} else if (CurType->isPipeType()) {
if (Attr.getSemanticSpelling() == OpenCLAccessAttr::Keyword_write_only) {
- QualType ElemType = CurType->getAs<PipeType>()->getElementType();
+ QualType ElemType = CurType->castAs<PipeType>()->getElementType();
CurType = S.Context.getWritePipeType(ElemType);
}
}
@@ -7828,49 +7945,8 @@ static void HandleMatrixTypeAttr(QualType &CurType, const ParsedAttr &Attr,
return;
}
- Expr *RowsExpr = nullptr;
- Expr *ColsExpr = nullptr;
-
- // TODO: Refactor parameter extraction into separate function
- // Get the number of rows
- if (Attr.isArgIdent(0)) {
- CXXScopeSpec SS;
- SourceLocation TemplateKeywordLoc;
- UnqualifiedId id;
- id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
- ExprResult Rows = S.ActOnIdExpression(S.getCurScope(), SS,
- TemplateKeywordLoc, id, false, false);
-
- if (Rows.isInvalid())
- // TODO: maybe a good error message would be nice here
- return;
- RowsExpr = Rows.get();
- } else {
- assert(Attr.isArgExpr(0) &&
- "Argument to should either be an identity or expression");
- RowsExpr = Attr.getArgAsExpr(0);
- }
-
- // Get the number of columns
- if (Attr.isArgIdent(1)) {
- CXXScopeSpec SS;
- SourceLocation TemplateKeywordLoc;
- UnqualifiedId id;
- id.setIdentifier(Attr.getArgAsIdent(1)->Ident, Attr.getLoc());
- ExprResult Columns = S.ActOnIdExpression(
- S.getCurScope(), SS, TemplateKeywordLoc, id, false, false);
-
- if (Columns.isInvalid())
- // TODO: a good error message would be nice here
- return;
- RowsExpr = Columns.get();
- } else {
- assert(Attr.isArgExpr(1) &&
- "Argument to should either be an identity or expression");
- ColsExpr = Attr.getArgAsExpr(1);
- }
-
- // Create the matrix type.
+ Expr *RowsExpr = Attr.getArgAsExpr(0);
+ Expr *ColsExpr = Attr.getArgAsExpr(1);
QualType T = S.BuildMatrixType(CurType, RowsExpr, ColsExpr, Attr.getLoc());
if (!T.isNull())
CurType = T;
@@ -7894,6 +7970,8 @@ static bool isAddressSpaceKind(const ParsedAttr &attr) {
return attrKind == ParsedAttr::AT_AddressSpace ||
attrKind == ParsedAttr::AT_OpenCLPrivateAddressSpace ||
attrKind == ParsedAttr::AT_OpenCLGlobalAddressSpace ||
+ attrKind == ParsedAttr::AT_OpenCLGlobalDeviceAddressSpace ||
+ attrKind == ParsedAttr::AT_OpenCLGlobalHostAddressSpace ||
attrKind == ParsedAttr::AT_OpenCLLocalAddressSpace ||
attrKind == ParsedAttr::AT_OpenCLConstantAddressSpace ||
attrKind == ParsedAttr::AT_OpenCLGenericAddressSpace;
@@ -7961,7 +8039,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk)
state.getSema().Diag(attr.getLoc(),
diag::warn_unknown_attribute_ignored)
- << attr;
+ << attr << attr.getRange();
break;
case ParsedAttr::IgnoredAttribute:
@@ -7974,6 +8052,8 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
break;
case ParsedAttr::AT_OpenCLPrivateAddressSpace:
case ParsedAttr::AT_OpenCLGlobalAddressSpace:
+ case ParsedAttr::AT_OpenCLGlobalDeviceAddressSpace:
+ case ParsedAttr::AT_OpenCLGlobalHostAddressSpace:
case ParsedAttr::AT_OpenCLLocalAddressSpace:
case ParsedAttr::AT_OpenCLConstantAddressSpace:
case ParsedAttr::AT_OpenCLGenericAddressSpace:
@@ -8004,6 +8084,10 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
VectorType::NeonPolyVector);
attr.setUsedAsTypeAttr();
break;
+ case ParsedAttr::AT_ArmSveVectorBits:
+ HandleArmSveVectorBitsTypeAttr(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
case ParsedAttr::AT_ArmMveStrictPolymorphism: {
HandleArmMveStrictPolymorphismAttr(state, type, attr);
attr.setUsedAsTypeAttr();
@@ -8192,6 +8276,20 @@ void Sema::completeExprArrayBound(Expr *E) {
}
}
+QualType Sema::getCompletedType(Expr *E) {
+ // Incomplete array types may be completed by the initializer attached to
+ // their definitions. For static data members of class templates and for
+ // variable templates, we need to instantiate the definition to get this
+ // initializer and complete the type.
+ if (E->getType()->isIncompleteArrayType())
+ completeExprArrayBound(E);
+
+ // FIXME: Are there other cases which require instantiating something other
+ // than the type to complete the type of an expression?
+
+ return E->getType();
+}
+
/// Ensure that the type of the given expression is complete.
///
/// This routine checks whether the expression \p E has a complete type. If the
@@ -8209,21 +8307,8 @@ void Sema::completeExprArrayBound(Expr *E) {
/// otherwise.
bool Sema::RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser) {
- QualType T = E->getType();
-
- // Incomplete array types may be completed by the initializer attached to
- // their definitions. For static data members of class templates and for
- // variable templates, we need to instantiate the definition to get this
- // initializer and complete the type.
- if (T->isIncompleteArrayType()) {
- completeExprArrayBound(E);
- T = E->getType();
- }
-
- // FIXME: Are there other cases which require instantiating something other
- // than the type to complete the type of an expression?
-
- return RequireCompleteType(E->getExprLoc(), T, Kind, Diagnoser);
+ return RequireCompleteType(E->getExprLoc(), getCompletedType(E), Kind,
+ Diagnoser);
}
bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
@@ -8747,7 +8832,17 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// C++11 [dcl.type.simple]p4:
// The type denoted by decltype(e) is defined as follows:
- //
+
+ // C++20:
+ // - if E is an unparenthesized id-expression naming a non-type
+ // template-parameter (13.2), decltype(E) is the type of the
+ // template-parameter after performing any necessary type deduction
+ // Note that this does not pick up the implicit 'const' for a template
+ // parameter object. This rule makes no difference before C++20 so we apply
+ // it unconditionally.
+ if (const auto *SNTTPE = dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ return SNTTPE->getParameterType(S.Context);
+
// - if e is an unparenthesized id-expression or an unparenthesized class
// member access (5.2.5), decltype(e) is the type of the entity named
// by e. If there is no such entity, or if e names a set of overloaded
@@ -8756,6 +8851,8 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// We apply the same rules for Objective-C ivar and property references.
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
const ValueDecl *VD = DRE->getDecl();
+ if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(VD))
+ return TPO->getType().getUnqualifiedType();
return VD->getType();
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
if (const ValueDecl *VD = ME->getMemberDecl())
@@ -8813,9 +8910,11 @@ QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc,
assert(!E->hasPlaceholderType() && "unexpected placeholder");
if (AsUnevaluated && CodeSynthesisContexts.empty() &&
- E->HasSideEffects(Context, false)) {
+ !E->isInstantiationDependent() && E->HasSideEffects(Context, false)) {
// The expression operand for decltype is in an unevaluated expression
// context, so side effects could result in unintended consequences.
+ // Exclude instantiation-dependent expressions, because 'decltype' is often
+ // used to build SFINAE gadgets.
Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
}
@@ -8842,7 +8941,7 @@ QualType Sema::BuildUnaryTransformType(QualType BaseType,
return QualType();
}
- EnumDecl *ED = BaseType->getAs<EnumType>()->getDecl();
+ EnumDecl *ED = BaseType->castAs<EnumType>()->getDecl();
assert(ED && "EnumType has no EnumDecl");
DiagnoseUseOfDecl(ED, Loc);
@@ -8880,11 +8979,8 @@ QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
else if (!T.isTriviallyCopyableType(Context))
// Some other non-trivially-copyable type (probably a C++ class)
DisallowedKind = 7;
- else if (auto *ExtTy = T->getAs<ExtIntType>()) {
- if (ExtTy->getNumBits() < 8)
+ else if (T->isExtIntType()) {
DisallowedKind = 8;
- else if (!llvm::isPowerOf2_32(ExtTy->getNumBits()))
- DisallowedKind = 9;
}
if (DisallowedKind != -1) {
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index ae0e9f1119b4..0a596e50658b 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -28,6 +28,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
+#include "clang/Basic/DiagnosticParse.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Sema/Designator.h"
#include "clang/Sema/Lookup.h"
@@ -730,10 +731,11 @@ public:
#define ABSTRACT_STMT(Stmt)
#include "clang/AST/StmtNodes.inc"
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
- LLVM_ATTRIBUTE_NOINLINE \
- OMPClause *Transform ## Class(Class *S);
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) \
+ LLVM_ATTRIBUTE_NOINLINE \
+ OMPClause *Transform##Class(Class *S);
+#include "llvm/Frontend/OpenMP/OMP.inc"
/// Build a new qualified type given its unqualified type and type location.
///
@@ -1304,7 +1306,7 @@ public:
return SemaRef.ActOnLabelStmt(IdentLoc, L, ColonLoc, SubStmt);
}
- /// Build a new label statement.
+ /// Build a new attributed statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1319,19 +1321,23 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
- Sema::ConditionResult Cond, Stmt *Init, Stmt *Then,
+ SourceLocation LParenLoc, Sema::ConditionResult Cond,
+ SourceLocation RParenLoc, Stmt *Init, Stmt *Then,
SourceLocation ElseLoc, Stmt *Else) {
- return getSema().ActOnIfStmt(IfLoc, IsConstexpr, Init, Cond, Then,
- ElseLoc, Else);
+ return getSema().ActOnIfStmt(IfLoc, IsConstexpr, LParenLoc, Init, Cond,
+ RParenLoc, Then, ElseLoc, Else);
}
/// Start building a new switch statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildSwitchStmtStart(SourceLocation SwitchLoc, Stmt *Init,
- Sema::ConditionResult Cond) {
- return getSema().ActOnStartOfSwitchStmt(SwitchLoc, Init, Cond);
+ StmtResult RebuildSwitchStmtStart(SourceLocation SwitchLoc,
+ SourceLocation LParenLoc, Stmt *Init,
+ Sema::ConditionResult Cond,
+ SourceLocation RParenLoc) {
+ return getSema().ActOnStartOfSwitchStmt(SwitchLoc, LParenLoc, Init, Cond,
+ RParenLoc);
}
/// Attach the body to the switch statement.
@@ -2006,26 +2012,32 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPToClause(ArrayRef<Expr *> VarList,
- CXXScopeSpec &MapperIdScopeSpec,
- DeclarationNameInfo &MapperId,
- const OMPVarListLocTy &Locs,
- ArrayRef<Expr *> UnresolvedMappers) {
- return getSema().ActOnOpenMPToClause(VarList, MapperIdScopeSpec, MapperId,
- Locs, UnresolvedMappers);
+ OMPClause *
+ RebuildOMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ ArrayRef<SourceLocation> MotionModifiersLoc,
+ CXXScopeSpec &MapperIdScopeSpec,
+ DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> UnresolvedMappers) {
+ return getSema().ActOnOpenMPToClause(MotionModifiers, MotionModifiersLoc,
+ MapperIdScopeSpec, MapperId, ColonLoc,
+ VarList, Locs, UnresolvedMappers);
}
/// Build a new OpenMP 'from' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPFromClause(ArrayRef<Expr *> VarList,
- CXXScopeSpec &MapperIdScopeSpec,
- DeclarationNameInfo &MapperId,
- const OMPVarListLocTy &Locs,
- ArrayRef<Expr *> UnresolvedMappers) {
- return getSema().ActOnOpenMPFromClause(VarList, MapperIdScopeSpec, MapperId,
- Locs, UnresolvedMappers);
+ OMPClause *
+ RebuildOMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ ArrayRef<SourceLocation> MotionModifiersLoc,
+ CXXScopeSpec &MapperIdScopeSpec,
+ DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> UnresolvedMappers) {
+ return getSema().ActOnOpenMPFromClause(
+ MotionModifiers, MotionModifiersLoc, MapperIdScopeSpec, MapperId,
+ ColonLoc, VarList, Locs, UnresolvedMappers);
}
/// Build a new OpenMP 'use_device_ptr' clause.
@@ -3500,7 +3512,8 @@ public:
// Build the CallExpr
ExprResult TheCall = CallExpr::Create(
SemaRef.Context, Callee, SubExprs, Builtin->getCallResultType(),
- Expr::getValueKindForType(Builtin->getReturnType()), RParenLoc);
+ Expr::getValueKindForType(Builtin->getReturnType()), RParenLoc,
+ FPOptionsOverride());
// Type-check the __builtin_shufflevector expression.
return SemaRef.SemaBuiltinShuffleVector(cast<CallExpr>(TheCall.get()));
@@ -3534,12 +3547,12 @@ public:
}
case TemplateArgument::Template:
- return TemplateArgumentLoc(TemplateArgument(
- Pattern.getArgument().getAsTemplate(),
- NumExpansions),
- Pattern.getTemplateQualifierLoc(),
- Pattern.getTemplateNameLoc(),
- EllipsisLoc);
+ return TemplateArgumentLoc(
+ SemaRef.Context,
+ TemplateArgument(Pattern.getArgument().getAsTemplate(),
+ NumExpansions),
+ Pattern.getTemplateQualifierLoc(), Pattern.getTemplateNameLoc(),
+ EllipsisLoc);
case TemplateArgument::Null:
case TemplateArgument::Integral:
@@ -3576,13 +3589,15 @@ public:
///
/// By default, performs semantic analysis in order to build a new fold
/// expression.
- ExprResult RebuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
+ ExprResult RebuildCXXFoldExpr(UnresolvedLookupExpr *ULE,
+ SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions) {
- return getSema().BuildCXXFoldExpr(LParenLoc, LHS, Operator, EllipsisLoc,
- RHS, RParenLoc, NumExpansions);
+ return getSema().BuildCXXFoldExpr(ULE, LParenLoc, LHS, Operator,
+ EllipsisLoc, RHS, RParenLoc,
+ NumExpansions);
}
/// Build an empty C++1z fold-expression with the given operator.
@@ -3609,8 +3624,8 @@ public:
}
ExprResult RebuildRecoveryExpr(SourceLocation BeginLoc, SourceLocation EndLoc,
- ArrayRef<Expr *> SubExprs) {
- return getSema().CreateRecoveryExpr(BeginLoc, EndLoc, SubExprs);
+ ArrayRef<Expr *> SubExprs, QualType Type) {
+ return getSema().CreateRecoveryExpr(BeginLoc, EndLoc, SubExprs, Type);
}
private:
@@ -3677,10 +3692,11 @@ OMPClause *TreeTransform<Derived>::TransformOMPClause(OMPClause *S) {
switch (S->getClauseKind()) {
default: break;
// Transform individual clause nodes
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) \
case Enum: \
- return getDerived().Transform ## Class(cast<Class>(S));
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ return getDerived().Transform##Class(cast<Class>(S));
+#include "llvm/Frontend/OpenMP/OMP.inc"
}
return S;
@@ -4275,8 +4291,8 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
if (Template.isNull())
return true;
- Output = TemplateArgumentLoc(TemplateArgument(Template), QualifierLoc,
- Input.getTemplateNameLoc());
+ Output = TemplateArgumentLoc(SemaRef.Context, TemplateArgument(Template),
+ QualifierLoc, Input.getTemplateNameLoc());
return false;
}
@@ -5162,7 +5178,7 @@ template <typename Derived>
QualType TreeTransform<Derived>::TransformDependentVectorType(
TypeLocBuilder &TLB, DependentVectorTypeLoc TL) {
const DependentVectorType *T = TL.getTypePtr();
- QualType ElementType = getDerived().TransformType(T->getElementType());
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
if (ElementType.isNull())
return QualType();
@@ -5203,7 +5219,7 @@ QualType TreeTransform<Derived>::TransformDependentSizedExtVectorType(
const DependentSizedExtVectorType *T = TL.getTypePtr();
// FIXME: ext vector locs should be nested
- QualType ElementType = getDerived().TransformType(T->getElementType());
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
if (ElementType.isNull())
return QualType();
@@ -5370,7 +5386,7 @@ template <typename Derived>
QualType TreeTransform<Derived>::TransformVectorType(TypeLocBuilder &TLB,
VectorTypeLoc TL) {
const VectorType *T = TL.getTypePtr();
- QualType ElementType = getDerived().TransformType(T->getElementType());
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
if (ElementType.isNull())
return QualType();
@@ -5393,7 +5409,7 @@ template<typename Derived>
QualType TreeTransform<Derived>::TransformExtVectorType(TypeLocBuilder &TLB,
ExtVectorTypeLoc TL) {
const VectorType *T = TL.getTypePtr();
- QualType ElementType = getDerived().TransformType(T->getElementType());
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
if (ElementType.isNull())
return QualType();
@@ -5465,6 +5481,7 @@ ParmVarDecl *TreeTransform<Derived>::TransformFunctionTypeParam(
/* DefArg */ nullptr);
newParm->setScopeInfo(OldParm->getFunctionScopeDepth(),
OldParm->getFunctionScopeIndex() + indexAdjustment);
+ transformedLocalDecl(OldParm, {newParm});
return newParm;
}
@@ -7280,9 +7297,9 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
Else.get() == S->getElse())
return S;
- return getDerived().RebuildIfStmt(S->getIfLoc(), S->isConstexpr(), Cond,
- Init.get(), Then.get(), S->getElseLoc(),
- Else.get());
+ return getDerived().RebuildIfStmt(
+ S->getIfLoc(), S->isConstexpr(), S->getLParenLoc(), Cond,
+ S->getRParenLoc(), Init.get(), Then.get(), S->getElseLoc(), Else.get());
}
template<typename Derived>
@@ -7301,8 +7318,9 @@ TreeTransform<Derived>::TransformSwitchStmt(SwitchStmt *S) {
return StmtError();
// Rebuild the switch statement.
- StmtResult Switch
- = getDerived().RebuildSwitchStmtStart(S->getSwitchLoc(), Init.get(), Cond);
+ StmtResult Switch =
+ getDerived().RebuildSwitchStmtStart(S->getSwitchLoc(), S->getLParenLoc(),
+ Init.get(), Cond, S->getRParenLoc());
if (Switch.isInvalid())
return StmtError();
@@ -8323,7 +8341,14 @@ StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
StmtResult Body;
{
Sema::CompoundScopeRAII CompoundScope(getSema());
- Stmt *CS = D->getInnermostCapturedStmt()->getCapturedStmt();
+ Stmt *CS;
+ if (D->getDirectiveKind() == OMPD_atomic ||
+ D->getDirectiveKind() == OMPD_critical ||
+ D->getDirectiveKind() == OMPD_section ||
+ D->getDirectiveKind() == OMPD_master)
+ CS = D->getAssociatedStmt();
+ else
+ CS = D->getInnermostCapturedStmt()->getCapturedStmt();
Body = getDerived().TransformStmt(CS);
}
AssociatedStmt =
@@ -9737,8 +9762,9 @@ OMPClause *TreeTransform<Derived>::TransformOMPToClause(OMPToClause *C) {
if (transformOMPMappableExprListClause<Derived, OMPToClause>(
*this, C, Vars, MapperIdScopeSpec, MapperIdInfo, UnresolvedMappers))
return nullptr;
- return getDerived().RebuildOMPToClause(Vars, MapperIdScopeSpec, MapperIdInfo,
- Locs, UnresolvedMappers);
+ return getDerived().RebuildOMPToClause(
+ C->getMotionModifiers(), C->getMotionModifiersLoc(), MapperIdScopeSpec,
+ MapperIdInfo, C->getColonLoc(), Vars, Locs, UnresolvedMappers);
}
template <typename Derived>
@@ -9752,7 +9778,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPFromClause(OMPFromClause *C) {
*this, C, Vars, MapperIdScopeSpec, MapperIdInfo, UnresolvedMappers))
return nullptr;
return getDerived().RebuildOMPFromClause(
- Vars, MapperIdScopeSpec, MapperIdInfo, Locs, UnresolvedMappers);
+ C->getMotionModifiers(), C->getMotionModifiersLoc(), MapperIdScopeSpec,
+ MapperIdInfo, C->getColonLoc(), Vars, Locs, UnresolvedMappers);
}
template <typename Derived>
@@ -10198,7 +10225,7 @@ ExprResult TreeTransform<Derived>::TransformRecoveryExpr(RecoveryExpr *E) {
if (!getDerived().AlwaysRebuild() && !Changed)
return E;
return getDerived().RebuildRecoveryExpr(E->getBeginLoc(), E->getEndLoc(),
- Children);
+ Children, E->getType());
}
template<typename Derived>
@@ -10466,6 +10493,15 @@ TreeTransform<Derived>::TransformCallExpr(CallExpr *E) {
// FIXME: Wrong source location information for the '('.
SourceLocation FakeLParenLoc
= ((Expr *)Callee.get())->getSourceRange().getBegin();
+
+ Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
+ if (E->hasStoredFPFeatures()) {
+ FPOptionsOverride NewOverrides = E->getFPFeatures();
+ getSema().CurFPFeatures =
+ NewOverrides.applyOverrides(getSema().getLangOpts());
+ getSema().FpPragmaStack.CurrentValue = NewOverrides;
+ }
+
return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc,
Args,
E->getRParenLoc());
@@ -10580,7 +10616,7 @@ TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) {
FPOptionsOverride NewOverrides(E->getFPFeatures(getSema().getLangOpts()));
getSema().CurFPFeatures =
NewOverrides.applyOverrides(getSema().getLangOpts());
- getSema().FpPragmaStack.CurrentValue = NewOverrides.getAsOpaqueInt();
+ getSema().FpPragmaStack.CurrentValue = NewOverrides;
return getDerived().RebuildBinaryOperator(E->getOperatorLoc(), E->getOpcode(),
LHS.get(), RHS.get());
}
@@ -10637,7 +10673,7 @@ TreeTransform<Derived>::TransformCompoundAssignOperator(
FPOptionsOverride NewOverrides(E->getFPFeatures(getSema().getLangOpts()));
getSema().CurFPFeatures =
NewOverrides.applyOverrides(getSema().getLangOpts());
- getSema().FpPragmaStack.CurrentValue = NewOverrides.getAsOpaqueInt();
+ getSema().FpPragmaStack.CurrentValue = NewOverrides;
return getDerived().TransformBinaryOperator(E);
}
@@ -11115,7 +11151,7 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
FPOptionsOverride NewOverrides(E->getFPFeatures());
getSema().CurFPFeatures =
NewOverrides.applyOverrides(getSema().getLangOpts());
- getSema().FpPragmaStack.CurrentValue = NewOverrides.getAsOpaqueInt();
+ getSema().FpPragmaStack.CurrentValue = NewOverrides;
return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(),
E->getOperatorLoc(),
@@ -12711,12 +12747,12 @@ TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
bool ArgumentChanged = false;
SmallVector<Expr*, 8> Args;
- Args.reserve(E->arg_size());
+ Args.reserve(E->getNumArgs());
{
EnterExpressionEvaluationContext Context(
getSema(), EnterExpressionEvaluationContext::InitList,
E->isListInitialization());
- if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args,
+ if (getDerived().TransformExprs(E->arg_begin(), E->getNumArgs(), true, Args,
&ArgumentChanged))
return ExprError();
}
@@ -13110,6 +13146,14 @@ TreeTransform<Derived>::TransformMaterializeTemporaryExpr(
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
+ UnresolvedLookupExpr *Callee = nullptr;
+ if (Expr *OldCallee = E->getCallee()) {
+ ExprResult CalleeResult = getDerived().TransformExpr(OldCallee);
+ if (CalleeResult.isInvalid())
+ return ExprError();
+ Callee = cast<UnresolvedLookupExpr>(CalleeResult.get());
+ }
+
Expr *Pattern = E->getPattern();
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
@@ -13149,8 +13193,20 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
return E;
return getDerived().RebuildCXXFoldExpr(
- E->getBeginLoc(), LHS.get(), E->getOperator(), E->getEllipsisLoc(),
- RHS.get(), E->getEndLoc(), NumExpansions);
+ Callee, E->getBeginLoc(), LHS.get(), E->getOperator(),
+ E->getEllipsisLoc(), RHS.get(), E->getEndLoc(), NumExpansions);
+ }
+
+ // Formally a fold expression expands to nested parenthesized expressions.
+ // Enforce this limit to avoid creating trees so deep we can't safely traverse
+ // them.
+ if (NumExpansions && SemaRef.getLangOpts().BracketDepth < NumExpansions) {
+ SemaRef.Diag(E->getEllipsisLoc(),
+ clang::diag::err_fold_expression_limit_exceeded)
+ << *NumExpansions << SemaRef.getLangOpts().BracketDepth
+ << E->getSourceRange();
+ SemaRef.Diag(E->getEllipsisLoc(), diag::note_bracket_depth);
+ return ExprError();
}
// The transform has determined that we should perform an elementwise
@@ -13170,8 +13226,8 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
return true;
Result = getDerived().RebuildCXXFoldExpr(
- E->getBeginLoc(), Out.get(), E->getOperator(), E->getEllipsisLoc(),
- Result.get(), E->getEndLoc(), OrigNumExpansions);
+ Callee, E->getBeginLoc(), Out.get(), E->getOperator(),
+ E->getEllipsisLoc(), Result.get(), E->getEndLoc(), OrigNumExpansions);
if (Result.isInvalid())
return true;
}
@@ -13186,16 +13242,21 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
if (Out.get()->containsUnexpandedParameterPack()) {
// We still have a pack; retain a pack expansion for this slice.
Result = getDerived().RebuildCXXFoldExpr(
- E->getBeginLoc(), LeftFold ? Result.get() : Out.get(),
+ Callee, E->getBeginLoc(), LeftFold ? Result.get() : Out.get(),
E->getOperator(), E->getEllipsisLoc(),
LeftFold ? Out.get() : Result.get(), E->getEndLoc(),
OrigNumExpansions);
} else if (Result.isUsable()) {
// We've got down to a single element; build a binary operator.
- Result = getDerived().RebuildBinaryOperator(
- E->getEllipsisLoc(), E->getOperator(),
- LeftFold ? Result.get() : Out.get(),
- LeftFold ? Out.get() : Result.get());
+ Expr *LHS = LeftFold ? Result.get() : Out.get();
+ Expr *RHS = LeftFold ? Out.get() : Result.get();
+ if (Callee)
+ Result = getDerived().RebuildCXXOperatorCallExpr(
+ BinaryOperator::getOverloadedOperator(E->getOperator()),
+ E->getEllipsisLoc(), Callee, LHS, RHS);
+ else
+ Result = getDerived().RebuildBinaryOperator(E->getEllipsisLoc(),
+ E->getOperator(), LHS, RHS);
} else
Result = Out;
@@ -13213,8 +13274,8 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
return true;
Result = getDerived().RebuildCXXFoldExpr(
- E->getBeginLoc(), Result.get(), E->getOperator(), E->getEllipsisLoc(),
- Out.get(), E->getEndLoc(), OrigNumExpansions);
+ Callee, E->getBeginLoc(), Result.get(), E->getOperator(),
+ E->getEllipsisLoc(), Out.get(), E->getEndLoc(), OrigNumExpansions);
if (Result.isInvalid())
return true;
}
diff --git a/clang/lib/Sema/UsedDeclVisitor.h b/clang/lib/Sema/UsedDeclVisitor.h
index d207e07f451a..c33d30478e2a 100644
--- a/clang/lib/Sema/UsedDeclVisitor.h
+++ b/clang/lib/Sema/UsedDeclVisitor.h
@@ -67,10 +67,13 @@ public:
void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
if (E->getOperatorDelete())
asImpl().visitUsedDecl(E->getBeginLoc(), E->getOperatorDelete());
- QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
- if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
+ QualType DestroyedOrNull = E->getDestroyedType();
+ if (!DestroyedOrNull.isNull()) {
+ QualType Destroyed = S.Context.getBaseElementType(DestroyedOrNull);
+ if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
+ }
}
Inherited::VisitCXXDeleteExpr(E);
diff --git a/clang/lib/Serialization/ASTCommon.cpp b/clang/lib/Serialization/ASTCommon.cpp
index bf583b02f96b..fec6dd5cf17d 100644
--- a/clang/lib/Serialization/ASTCommon.cpp
+++ b/clang/lib/Serialization/ASTCommon.cpp
@@ -237,6 +237,11 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
ID = PREDEF_TYPE_##Id##_ID; \
break;
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case BuiltinType::Id: \
+ ID = PREDEF_TYPE_##Id##_ID; \
+ break;
+#include "clang/Basic/PPCTypes.def"
case BuiltinType::BuiltinFn:
ID = PREDEF_TYPE_BUILTIN_FN;
break;
@@ -378,6 +383,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::Field:
case Decl::MSProperty:
case Decl::MSGuid:
+ case Decl::TemplateParamObject:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
case Decl::NonTypeTemplateParm:
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index 4a1a995204e5..1f68f6bc3e38 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -390,8 +390,10 @@ static bool checkTargetOptions(const TargetOptions &TargetOpts,
// We can tolerate different CPUs in many cases, notably when one CPU
// supports a strict superset of another. When allowing compatible
// differences skip this check.
- if (!AllowCompatibleDifferences)
+ if (!AllowCompatibleDifferences) {
CHECK_TARGET_OPT(CPU, "target CPU");
+ CHECK_TARGET_OPT(TuneCPU, "tune CPU");
+ }
#undef CHECK_TARGET_OPT
@@ -908,9 +910,8 @@ ASTIdentifierLookupTraitBase::ReadKey(const unsigned char* d, unsigned n) {
/// Whether the given identifier is "interesting".
static bool isInterestingIdentifier(ASTReader &Reader, IdentifierInfo &II,
bool IsModule) {
- return II.hadMacroDefinition() ||
- II.isPoisoned() ||
- (IsModule ? II.hasRevertedBuiltin() : II.getObjCOrBuiltinID()) ||
+ return II.hadMacroDefinition() || II.isPoisoned() ||
+ (!IsModule && II.getObjCOrBuiltinID()) ||
II.hasRevertedTokenIDToIdentifier() ||
(!(IsModule && Reader.getPreprocessor().getLangOpts().CPlusPlus) &&
II.getFETokenInfo());
@@ -970,7 +971,6 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
unsigned Bits = endian::readNext<uint16_t, little, unaligned>(d);
bool CPlusPlusOperatorKeyword = readBit(Bits);
bool HasRevertedTokenIDToIdentifier = readBit(Bits);
- bool HasRevertedBuiltin = readBit(Bits);
bool Poisoned = readBit(Bits);
bool ExtensionToken = readBit(Bits);
bool HadMacroDefinition = readBit(Bits);
@@ -984,12 +984,6 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
II->revertTokenIDToIdentifier();
if (!F.isModule())
II->setObjCOrBuiltinID(ObjCOrBuiltinID);
- else if (HasRevertedBuiltin && II->getBuiltinID()) {
- II->revertBuiltin();
- assert((II->hasRevertedBuiltin() ||
- II->getObjCOrBuiltinID() == ObjCOrBuiltinID) &&
- "Incorrect ObjC keyword or builtin ID");
- }
assert(II->isExtensionToken() == ExtensionToken &&
"Incorrect extension token flag");
(void)ExtensionToken;
@@ -1252,12 +1246,6 @@ void ASTReader::Error(unsigned DiagID, StringRef Arg1, StringRef Arg2,
Diag(DiagID) << Arg1 << Arg2 << Arg3;
}
-void ASTReader::Error(unsigned DiagID, StringRef Arg1, StringRef Arg2,
- unsigned Select) const {
- if (!Diags.isDiagnosticInFlight())
- Diag(DiagID) << Arg1 << Arg2 << Select;
-}
-
void ASTReader::Error(llvm::Error &&Err) const {
Error(toString(std::move(Err)));
}
@@ -1511,7 +1499,7 @@ bool ASTReader::ReadSLocEntry(int ID) {
// we will also try to fail gracefully by setting up the SLocEntry.
unsigned InputID = Record[4];
InputFile IF = getInputFile(*F, InputID);
- const FileEntry *File = IF.getFile();
+ Optional<FileEntryRef> File = IF.getFile();
bool OverriddenBuffer = IF.isOverridden();
// Note that we only check if a File was returned. If it was out-of-date
@@ -1527,9 +1515,8 @@ bool ASTReader::ReadSLocEntry(int ID) {
}
SrcMgr::CharacteristicKind
FileCharacter = (SrcMgr::CharacteristicKind)Record[2];
- // FIXME: The FileID should be created from the FileEntryRef.
- FileID FID = SourceMgr.createFileID(File, IncludeLoc, FileCharacter,
- ID, BaseOffset + Record[0]);
+ FileID FID = SourceMgr.createFileID(*File, IncludeLoc, FileCharacter, ID,
+ BaseOffset + Record[0]);
SrcMgr::FileInfo &FileInfo =
const_cast<SrcMgr::FileInfo&>(SourceMgr.getSLocEntry(FID).getFile());
FileInfo.NumCreatedFIDs = Record[5];
@@ -1544,15 +1531,15 @@ bool ASTReader::ReadSLocEntry(int ID) {
NumFileDecls));
}
- const SrcMgr::ContentCache *ContentCache
- = SourceMgr.getOrCreateContentCache(File, isSystem(FileCharacter));
- if (OverriddenBuffer && !ContentCache->BufferOverridden &&
- ContentCache->ContentsEntry == ContentCache->OrigEntry &&
- !ContentCache->getRawBuffer()) {
+ const SrcMgr::ContentCache &ContentCache =
+ SourceMgr.getOrCreateContentCache(*File, isSystem(FileCharacter));
+ if (OverriddenBuffer && !ContentCache.BufferOverridden &&
+ ContentCache.ContentsEntry == ContentCache.OrigEntry &&
+ !ContentCache.getBufferIfLoaded()) {
auto Buffer = ReadBuffer(SLocEntryCursor, File->getName());
if (!Buffer)
return true;
- SourceMgr.overrideFileContents(File, std::move(Buffer));
+ SourceMgr.overrideFileContents(*File, std::move(Buffer));
}
break;
@@ -1927,7 +1914,8 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
// FIXME: This is not always the right filename-as-written, but we're not
// going to use this information to rebuild the module, so it doesn't make
// a lot of difference.
- Module::Header H = {std::string(key.Filename), *FileMgr.getFile(Filename)};
+ Module::Header H = {std::string(key.Filename),
+ *FileMgr.getOptionalFileRef(Filename)};
ModMap.addHeader(Mod, H, HeaderRole, /*Imported*/true);
HFI.isModuleHeader |= !(HeaderRole & ModuleMap::TextualHeader);
}
@@ -2222,6 +2210,29 @@ void ASTReader::resolvePendingMacro(IdentifierInfo *II,
PP.setLoadedMacroDirective(II, Earliest, Latest);
}
+bool ASTReader::shouldDisableValidationForFile(
+ const serialization::ModuleFile &M) const {
+ if (DisableValidationKind == DisableValidationForModuleKind::None)
+ return false;
+
+ // If a PCH is loaded and validation is disabled for PCH then disable
+ // validation for the PCH and the modules it loads.
+ ModuleKind K = CurrentDeserializingModuleKind.getValueOr(M.Kind);
+
+ switch (K) {
+ case MK_MainFile:
+ case MK_Preamble:
+ case MK_PCH:
+ return bool(DisableValidationKind & DisableValidationForModuleKind::PCH);
+ case MK_ImplicitModule:
+ case MK_ExplicitModule:
+ case MK_PrebuiltModule:
+ return bool(DisableValidationKind & DisableValidationForModuleKind::Module);
+ }
+
+ return false;
+}
+
ASTReader::InputFileInfo
ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
// Go find this input file.
@@ -2308,27 +2319,25 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
StringRef Filename = FI.Filename;
uint64_t StoredContentHash = FI.ContentHash;
- const FileEntry *File = nullptr;
- if (auto FE = FileMgr.getFile(Filename, /*OpenFile=*/false))
- File = *FE;
+ OptionalFileEntryRefDegradesToFileEntryPtr File =
+ expectedToOptional(FileMgr.getFileRef(Filename, /*OpenFile=*/false));
// If we didn't find the file, resolve it relative to the
// original directory from which this AST file was created.
- if (File == nullptr && !F.OriginalDir.empty() && !F.BaseDirectory.empty() &&
+ if (!File && !F.OriginalDir.empty() && !F.BaseDirectory.empty() &&
F.OriginalDir != F.BaseDirectory) {
std::string Resolved = resolveFileRelativeToOriginalDir(
std::string(Filename), F.OriginalDir, F.BaseDirectory);
if (!Resolved.empty())
- if (auto FE = FileMgr.getFile(Resolved))
- File = *FE;
+ File = expectedToOptional(FileMgr.getFileRef(Resolved));
}
// For an overridden file, create a virtual file with the stored
// size/timestamp.
- if ((Overridden || Transient) && File == nullptr)
- File = FileMgr.getVirtualFile(Filename, StoredSize, StoredTime);
+ if ((Overridden || Transient) && !File)
+ File = FileMgr.getVirtualFileRef(Filename, StoredSize, StoredTime);
- if (File == nullptr) {
+ if (!File) {
if (Complain) {
std::string ErrorStr = "could not find file '";
ErrorStr += Filename;
@@ -2370,7 +2379,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
auto HasInputFileChanged = [&]() {
if (StoredSize != File->getSize())
return ModificationType::Size;
- if (!DisableValidation && StoredTime &&
+ if (!shouldDisableValidationForFile(F) && StoredTime &&
StoredTime != File->getModificationTime()) {
// In case the modification time changes but not the content,
// accept the cached file as legit.
@@ -2401,7 +2410,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
auto FileChange = HasInputFileChanged();
// For an overridden file, there is nothing to validate.
if (!Overridden && FileChange != ModificationType::None) {
- if (Complain) {
+ if (Complain && !Diags.isDiagnosticInFlight()) {
// Build a list of the PCH imports that got us here (in reverse).
SmallVector<ModuleFile *, 4> ImportStack(1, &F);
while (!ImportStack.back()->ImportedBy.empty())
@@ -2409,20 +2418,12 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// The top-level PCH is stale.
StringRef TopLevelPCHName(ImportStack.back()->FileName);
- unsigned DiagnosticKind =
- moduleKindForDiagnostic(ImportStack.back()->Kind);
- if (DiagnosticKind == 0)
- Error(diag::err_fe_pch_file_modified, Filename, TopLevelPCHName,
- (unsigned)FileChange);
- else if (DiagnosticKind == 1)
- Error(diag::err_fe_module_file_modified, Filename, TopLevelPCHName,
- (unsigned)FileChange);
- else
- Error(diag::err_fe_ast_file_modified, Filename, TopLevelPCHName,
- (unsigned)FileChange);
+ Diag(diag::err_fe_ast_file_modified)
+ << Filename << moduleKindForDiagnostic(ImportStack.back()->Kind)
+ << TopLevelPCHName << FileChange;
// Print the import stack.
- if (ImportStack.size() > 1 && !Diags.isDiagnosticInFlight()) {
+ if (ImportStack.size() > 1) {
Diag(diag::note_pch_required_by)
<< Filename << ImportStack[0]->FileName;
for (unsigned I = 1; I < ImportStack.size(); ++I)
@@ -2430,8 +2431,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
<< ImportStack[I-1]->FileName << ImportStack[I]->FileName;
}
- if (!Diags.isDiagnosticInFlight())
- Diag(diag::note_pch_rebuild_required) << TopLevelPCHName;
+ Diag(diag::note_pch_rebuild_required) << TopLevelPCHName;
}
IsOutOfDate = true;
@@ -2439,7 +2439,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// FIXME: If the file is overridden and we've already opened it,
// issue an error (or split it into a separate FileEntry).
- InputFile IF = InputFile(File, Overridden || Transient, IsOutOfDate);
+ InputFile IF = InputFile(*File, Overridden || Transient, IsOutOfDate);
// Note that we've loaded this input file.
F.InputFilesLoaded[ID-1] = IF;
@@ -2595,6 +2595,8 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return Success;
};
+ bool DisableValidation = shouldDisableValidationForFile(F);
+
// Read all of the records and blocks in the control block.
RecordData Record;
unsigned NumInputs = 0;
@@ -2747,7 +2749,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return VersionMismatch;
}
- bool hasErrors = Record[7];
+ bool hasErrors = Record[6];
if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) {
Diag(diag::err_pch_with_compiler_errors);
return HadErrors;
@@ -2765,8 +2767,6 @@ ASTReader::ReadControlBlock(ModuleFile &F,
F.HasTimestamps = Record[5];
- F.PCHHasObjectFile = Record[6];
-
const std::string &CurBranch = getClangFullRepositoryVersion();
StringRef ASTBranch = Blob;
if (StringRef(CurBranch) != ASTBranch && !DisableValidation) {
@@ -2895,7 +2895,8 @@ ASTReader::ReadControlBlock(ModuleFile &F,
// If we're implicitly loading a module, the base directory can't
// change between the build and use.
// Don't emit module relocation error if we have -fno-validate-pch
- if (!PP.getPreprocessorOpts().DisablePCHValidation &&
+ if (!bool(PP.getPreprocessorOpts().DisablePCHOrModuleValidation &
+ DisableValidationForModuleKind::Module) &&
F.Kind != MK_ExplicitModule && F.Kind != MK_PrebuiltModule) {
auto BuildDir = PP.getFileManager().getDirectory(Blob);
if (!BuildDir || *BuildDir != M->Directory) {
@@ -3607,11 +3608,12 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case OPENCL_EXTENSIONS:
for (unsigned I = 0, E = Record.size(); I != E; ) {
auto Name = ReadString(Record, I);
- auto &Opt = OpenCLExtensions.OptMap[Name];
- Opt.Supported = Record[I++] != 0;
- Opt.Enabled = Record[I++] != 0;
- Opt.Avail = Record[I++];
- Opt.Core = Record[I++];
+ auto &OptInfo = OpenCLExtensions.OptMap[Name];
+ OptInfo.Supported = Record[I++] != 0;
+ OptInfo.Enabled = Record[I++] != 0;
+ OptInfo.Avail = Record[I++];
+ OptInfo.Core = Record[I++];
+ OptInfo.Opt = Record[I++];
}
break;
@@ -3722,7 +3724,9 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
case LATE_PARSED_TEMPLATE:
- LateParsedTemplates.append(Record.begin(), Record.end());
+ LateParsedTemplates.emplace_back(
+ std::piecewise_construct, std::forward_as_tuple(&F),
+ std::forward_as_tuple(Record.begin(), Record.end()));
break;
case OPTIMIZE_PRAGMA_OPTIONS:
@@ -3764,25 +3768,25 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
ForceCUDAHostDeviceDepth = Record[0];
break;
- case PACK_PRAGMA_OPTIONS: {
+ case ALIGN_PACK_PRAGMA_OPTIONS: {
if (Record.size() < 3) {
Error("invalid pragma pack record");
return Failure;
}
- PragmaPackCurrentValue = Record[0];
- PragmaPackCurrentLocation = ReadSourceLocation(F, Record[1]);
+ PragmaAlignPackCurrentValue = ReadAlignPackInfo(Record[0]);
+ PragmaAlignPackCurrentLocation = ReadSourceLocation(F, Record[1]);
unsigned NumStackEntries = Record[2];
unsigned Idx = 3;
// Reset the stack when importing a new module.
- PragmaPackStack.clear();
+ PragmaAlignPackStack.clear();
for (unsigned I = 0; I < NumStackEntries; ++I) {
- PragmaPackStackEntry Entry;
- Entry.Value = Record[Idx++];
+ PragmaAlignPackStackEntry Entry;
+ Entry.Value = ReadAlignPackInfo(Record[Idx++]);
Entry.Location = ReadSourceLocation(F, Record[Idx++]);
Entry.PushLocation = ReadSourceLocation(F, Record[Idx++]);
- PragmaPackStrings.push_back(ReadString(Record, Idx));
- Entry.SlotLabel = PragmaPackStrings.back();
- PragmaPackStack.push_back(Entry);
+ PragmaAlignPackStrings.push_back(ReadString(Record, Idx));
+ Entry.SlotLabel = PragmaAlignPackStrings.back();
+ PragmaAlignPackStack.push_back(Entry);
}
break;
}
@@ -3792,7 +3796,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error("invalid pragma pack record");
return Failure;
}
- FpPragmaCurrentValue = Record[0];
+ FpPragmaCurrentValue = FPOptionsOverride::getFromOpaqueInt(Record[0]);
FpPragmaCurrentLocation = ReadSourceLocation(F, Record[1]);
unsigned NumStackEntries = Record[2];
unsigned Idx = 3;
@@ -3800,7 +3804,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
FpPragmaStack.clear();
for (unsigned I = 0; I < NumStackEntries; ++I) {
FpPragmaStackEntry Entry;
- Entry.Value = Record[Idx++];
+ Entry.Value = FPOptionsOverride::getFromOpaqueInt(Record[Idx++]);
Entry.Location = ReadSourceLocation(F, Record[Idx++]);
Entry.PushLocation = ReadSourceLocation(F, Record[Idx++]);
FpPragmaStrings.push_back(ReadString(Record, Idx));
@@ -3925,9 +3929,11 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
auto &Map = PP.getHeaderSearchInfo().getModuleMap();
const FileEntry *ModMap = M ? Map.getModuleMapFileForUniquing(M) : nullptr;
// Don't emit module relocation error if we have -fno-validate-pch
- if (!PP.getPreprocessorOpts().DisablePCHValidation && !ModMap) {
+ if (!bool(PP.getPreprocessorOpts().DisablePCHOrModuleValidation &
+ DisableValidationForModuleKind::Module) &&
+ !ModMap) {
if ((ClientLoadCapabilities & ARR_OutOfDate) == 0) {
- if (auto *ASTFE = M ? M->getASTFile() : nullptr) {
+ if (auto ASTFE = M ? M->getASTFile() : None) {
// This module was defined by an imported (explicit) module.
Diag(diag::err_module_file_conflict) << F.ModuleName << F.FileName
<< ASTFE->getName();
@@ -3948,7 +3954,7 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
return OutOfDate;
}
- assert(M->Name == F.ModuleName && "found module with different name");
+ assert(M && M->Name == F.ModuleName && "found module with different name");
// Check the primary module map file.
auto StoredModMap = FileMgr.getFile(F.ModuleMapPath);
@@ -4211,6 +4217,8 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
SmallVectorImpl<ImportedSubmodule> *Imported) {
llvm::SaveAndRestore<SourceLocation>
SetCurImportLocRAII(CurrentImportLoc, ImportLoc);
+ llvm::SaveAndRestore<Optional<ModuleKind>> SetCurModuleKindRAII(
+ CurrentDeserializingModuleKind, Type);
// Defer any pending actions until we get to the end of reading the AST file.
Deserializing AnASTFile(this);
@@ -4518,9 +4526,9 @@ ASTReader::ReadASTCore(StringRef FileName,
return Missing;
// Otherwise, return an error.
- Diag(diag::err_module_file_not_found) << moduleKindForDiagnostic(Type)
- << FileName << !ErrorStr.empty()
- << ErrorStr;
+ Diag(diag::err_ast_file_not_found)
+ << moduleKindForDiagnostic(Type) << FileName << !ErrorStr.empty()
+ << ErrorStr;
return Failure;
case ModuleManager::OutOfDate:
@@ -4530,9 +4538,9 @@ ASTReader::ReadASTCore(StringRef FileName,
return OutOfDate;
// Otherwise, return an error.
- Diag(diag::err_module_file_out_of_date) << moduleKindForDiagnostic(Type)
- << FileName << !ErrorStr.empty()
- << ErrorStr;
+ Diag(diag::err_ast_file_out_of_date)
+ << moduleKindForDiagnostic(Type) << FileName << !ErrorStr.empty()
+ << ErrorStr;
return Failure;
}
@@ -4553,7 +4561,7 @@ ASTReader::ReadASTCore(StringRef FileName,
// Sniff for the signature.
if (llvm::Error Err = doesntStartWithASTFileMagic(Stream)) {
- Diag(diag::err_module_file_invalid)
+ Diag(diag::err_ast_file_invalid)
<< moduleKindForDiagnostic(Type) << FileName << std::move(Err);
return Failure;
}
@@ -4645,6 +4653,7 @@ ASTReader::readUnhashedControlBlock(ModuleFile &F, bool WasImportedBy,
PP.getHeaderSearchInfo().getHeaderSearchOpts();
bool AllowCompatibleConfigurationMismatch =
F.Kind == MK_ExplicitModule || F.Kind == MK_PrebuiltModule;
+ bool DisableValidation = shouldDisableValidationForFile(F);
ASTReadResult Result = readUnhashedControlBlockImpl(
&F, F.Data, ClientLoadCapabilities, AllowCompatibleConfigurationMismatch,
@@ -4989,10 +4998,10 @@ void ASTReader::InitializeContext() {
/*ImportLoc=*/Import.ImportLoc);
if (Import.ImportLoc.isValid())
PP.makeModuleVisible(Imported, Import.ImportLoc);
- // FIXME: should we tell Sema to make the module visible too?
+ // This updates visibility for Preprocessor only. For Sema, which can be
+ // nullptr here, we do the same later, in UpdateSema().
}
}
- ImportedModules.clear();
}
void ASTReader::finalizeForWriting() {
@@ -5536,7 +5545,8 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
if (!ParentModule) {
if (const FileEntry *CurFile = CurrentModule->getASTFile()) {
// Don't emit module relocation error if we have -fno-validate-pch
- if (!PP.getPreprocessorOpts().DisablePCHValidation &&
+ if (!bool(PP.getPreprocessorOpts().DisablePCHOrModuleValidation &
+ DisableValidationForModuleKind::Module) &&
CurFile != F.File) {
Error(diag::err_module_file_conflict,
CurrentModule->getTopLevelModuleName(), CurFile->getName(),
@@ -5587,7 +5597,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case SUBMODULE_UMBRELLA_HEADER: {
std::string Filename = std::string(Blob);
ResolveImportedPath(F, Filename);
- if (auto Umbrella = PP.getFileManager().getFile(Filename)) {
+ if (auto Umbrella = PP.getFileManager().getOptionalFileRef(Filename)) {
if (!CurrentModule->getUmbrellaHeader())
ModMap.setUmbrellaHeader(CurrentModule, *Umbrella, Blob);
else if (CurrentModule->getUmbrellaHeader().Entry != *Umbrella) {
@@ -5620,7 +5630,8 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case SUBMODULE_UMBRELLA_DIR: {
std::string Dirname = std::string(Blob);
ResolveImportedPath(F, Dirname);
- if (auto Umbrella = PP.getFileManager().getDirectory(Dirname)) {
+ if (auto Umbrella =
+ PP.getFileManager().getOptionalDirectoryRef(Dirname)) {
if (!CurrentModule->getUmbrellaDir())
ModMap.setUmbrellaDir(CurrentModule, *Umbrella, Blob);
else if (CurrentModule->getUmbrellaDir().Entry != *Umbrella) {
@@ -5781,6 +5792,7 @@ bool ASTReader::ParseTargetOptions(const RecordData &Record, bool Complain,
TargetOptions TargetOpts;
TargetOpts.Triple = ReadString(Record, Idx);
TargetOpts.CPU = ReadString(Record, Idx);
+ TargetOpts.TuneCPU = ReadString(Record, Idx);
TargetOpts.ABI = ReadString(Record, Idx);
for (unsigned N = Record[Idx++]; N; --N) {
TargetOpts.FeaturesAsWritten.push_back(ReadString(Record, Idx));
@@ -5849,6 +5861,7 @@ bool ASTReader::ParseHeaderSearchOptions(const RecordData &Record,
HSOpts.DisableModuleHash = Record[Idx++];
HSOpts.ImplicitModuleMaps = Record[Idx++];
HSOpts.ModuleMapFileHomeIsCwd = Record[Idx++];
+ HSOpts.EnablePrebuiltImplicitModules = Record[Idx++];
HSOpts.UseBuiltinIncludes = Record[Idx++];
HSOpts.UseStandardSystemIncludes = Record[Idx++];
HSOpts.UseStandardCXXIncludes = Record[Idx++];
@@ -6476,8 +6489,8 @@ void TypeLocReader::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
TL.setBuiltinLoc(readSourceLocation());
if (TL.needsExtraLocalData()) {
TL.setWrittenTypeSpec(static_cast<DeclSpec::TST>(Reader.readInt()));
- TL.setWrittenSignSpec(static_cast<DeclSpec::TSS>(Reader.readInt()));
- TL.setWrittenWidthSpec(static_cast<DeclSpec::TSW>(Reader.readInt()));
+ TL.setWrittenSignSpec(static_cast<TypeSpecifierSign>(Reader.readInt()));
+ TL.setWrittenWidthSpec(static_cast<TypeSpecifierWidth>(Reader.readInt()));
TL.setModeAttr(Reader.readInt());
}
}
@@ -7044,6 +7057,11 @@ QualType ASTReader::GetType(TypeID ID) {
T = Context.SingletonId; \
break;
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) \
+ case PREDEF_TYPE_##Id##_ID: \
+ T = Context.Id##Ty; \
+ break;
+#include "clang/Basic/PPCTypes.def"
}
assert(!T.isNull() && "Unknown predefined type");
@@ -7100,15 +7118,15 @@ ASTRecordReader::readTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind) {
NestedNameSpecifierLoc QualifierLoc =
readNestedNameSpecifierLoc();
SourceLocation TemplateNameLoc = readSourceLocation();
- return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
- SourceLocation());
+ return TemplateArgumentLocInfo(getASTContext(), QualifierLoc,
+ TemplateNameLoc, SourceLocation());
}
case TemplateArgument::TemplateExpansion: {
NestedNameSpecifierLoc QualifierLoc = readNestedNameSpecifierLoc();
SourceLocation TemplateNameLoc = readSourceLocation();
SourceLocation EllipsisLoc = readSourceLocation();
- return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
- EllipsisLoc);
+ return TemplateArgumentLocInfo(getASTContext(), QualifierLoc,
+ TemplateNameLoc, EllipsisLoc);
}
case TemplateArgument::Null:
case TemplateArgument::Integral:
@@ -7844,12 +7862,13 @@ void ASTReader::InitializeSema(Sema &S) {
// FIXME: What happens if these are changed by a module import?
if (!FPPragmaOptions.empty()) {
assert(FPPragmaOptions.size() == 1 && "Wrong number of FP_PRAGMA_OPTIONS");
- FPOptionsOverride NewOverrides(FPPragmaOptions[0]);
+ FPOptionsOverride NewOverrides =
+ FPOptionsOverride::getFromOpaqueInt(FPPragmaOptions[0]);
SemaObj->CurFPFeatures =
NewOverrides.applyOverrides(SemaObj->getLangOpts());
}
- SemaObj->OpenCLFeatures.copy(OpenCLExtensions);
+ SemaObj->OpenCLFeatures = OpenCLExtensions;
SemaObj->OpenCLTypeExtMap = OpenCLTypeExtMap;
SemaObj->OpenCLDeclExtMap = OpenCLDeclExtMap;
@@ -7888,32 +7907,37 @@ void ASTReader::UpdateSema() {
}
SemaObj->ForceCUDAHostDeviceDepth = ForceCUDAHostDeviceDepth;
- if (PragmaPackCurrentValue) {
+ if (PragmaAlignPackCurrentValue) {
// The bottom of the stack might have a default value. It must be adjusted
// to the current value to ensure that the packing state is preserved after
// popping entries that were included/imported from a PCH/module.
bool DropFirst = false;
- if (!PragmaPackStack.empty() &&
- PragmaPackStack.front().Location.isInvalid()) {
- assert(PragmaPackStack.front().Value == SemaObj->PackStack.DefaultValue &&
+ if (!PragmaAlignPackStack.empty() &&
+ PragmaAlignPackStack.front().Location.isInvalid()) {
+ assert(PragmaAlignPackStack.front().Value ==
+ SemaObj->AlignPackStack.DefaultValue &&
"Expected a default alignment value");
- SemaObj->PackStack.Stack.emplace_back(
- PragmaPackStack.front().SlotLabel, SemaObj->PackStack.CurrentValue,
- SemaObj->PackStack.CurrentPragmaLocation,
- PragmaPackStack.front().PushLocation);
+ SemaObj->AlignPackStack.Stack.emplace_back(
+ PragmaAlignPackStack.front().SlotLabel,
+ SemaObj->AlignPackStack.CurrentValue,
+ SemaObj->AlignPackStack.CurrentPragmaLocation,
+ PragmaAlignPackStack.front().PushLocation);
DropFirst = true;
}
- for (const auto &Entry :
- llvm::makeArrayRef(PragmaPackStack).drop_front(DropFirst ? 1 : 0))
- SemaObj->PackStack.Stack.emplace_back(Entry.SlotLabel, Entry.Value,
- Entry.Location, Entry.PushLocation);
- if (PragmaPackCurrentLocation.isInvalid()) {
- assert(*PragmaPackCurrentValue == SemaObj->PackStack.DefaultValue &&
- "Expected a default alignment value");
+ for (const auto &Entry : llvm::makeArrayRef(PragmaAlignPackStack)
+ .drop_front(DropFirst ? 1 : 0)) {
+ SemaObj->AlignPackStack.Stack.emplace_back(
+ Entry.SlotLabel, Entry.Value, Entry.Location, Entry.PushLocation);
+ }
+ if (PragmaAlignPackCurrentLocation.isInvalid()) {
+ assert(*PragmaAlignPackCurrentValue ==
+ SemaObj->AlignPackStack.DefaultValue &&
+ "Expected a default align and pack value");
// Keep the current values.
} else {
- SemaObj->PackStack.CurrentValue = *PragmaPackCurrentValue;
- SemaObj->PackStack.CurrentPragmaLocation = PragmaPackCurrentLocation;
+ SemaObj->AlignPackStack.CurrentValue = *PragmaAlignPackCurrentValue;
+ SemaObj->AlignPackStack.CurrentPragmaLocation =
+ PragmaAlignPackCurrentLocation;
}
}
if (FpPragmaCurrentValue) {
@@ -7944,6 +7968,15 @@ void ASTReader::UpdateSema() {
SemaObj->FpPragmaStack.CurrentPragmaLocation = FpPragmaCurrentLocation;
}
}
+
+ // For non-modular AST files, restore visiblity of modules.
+ for (auto &Import : ImportedModules) {
+ if (Import.ImportLoc.isInvalid())
+ continue;
+ if (Module *Imported = getSubmodule(Import.ID)) {
+ SemaObj->makeModuleVisible(Imported, Import.ImportLoc);
+ }
+ }
}
IdentifierInfo *ASTReader::get(StringRef Name) {
@@ -8378,25 +8411,28 @@ void ASTReader::ReadPendingInstantiations(
void ASTReader::ReadLateParsedTemplates(
llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>>
&LPTMap) {
- for (unsigned Idx = 0, N = LateParsedTemplates.size(); Idx < N;
- /* In loop */) {
- FunctionDecl *FD = cast<FunctionDecl>(GetDecl(LateParsedTemplates[Idx++]));
+ for (auto &LPT : LateParsedTemplates) {
+ ModuleFile *FMod = LPT.first;
+ RecordDataImpl &LateParsed = LPT.second;
+ for (unsigned Idx = 0, N = LateParsed.size(); Idx < N;
+ /* In loop */) {
+ FunctionDecl *FD =
+ cast<FunctionDecl>(GetLocalDecl(*FMod, LateParsed[Idx++]));
- auto LT = std::make_unique<LateParsedTemplate>();
- LT->D = GetDecl(LateParsedTemplates[Idx++]);
+ auto LT = std::make_unique<LateParsedTemplate>();
+ LT->D = GetLocalDecl(*FMod, LateParsed[Idx++]);
- ModuleFile *F = getOwningModuleFile(LT->D);
- assert(F && "No module");
+ ModuleFile *F = getOwningModuleFile(LT->D);
+ assert(F && "No module");
- unsigned TokN = LateParsedTemplates[Idx++];
- LT->Toks.reserve(TokN);
- for (unsigned T = 0; T < TokN; ++T)
- LT->Toks.push_back(ReadToken(*F, LateParsedTemplates, Idx));
+ unsigned TokN = LateParsed[Idx++];
+ LT->Toks.reserve(TokN);
+ for (unsigned T = 0; T < TokN; ++T)
+ LT->Toks.push_back(ReadToken(*F, LateParsed, Idx));
- LPTMap.insert(std::make_pair(FD, std::move(LT)));
+ LPTMap.insert(std::make_pair(FD, std::move(LT)));
+ }
}
-
- LateParsedTemplates.clear();
}
void ASTReader::LoadSelector(Selector Sel) {
@@ -8590,11 +8626,6 @@ Module *ASTReader::getModule(unsigned ID) {
return getSubmodule(ID);
}
-bool ASTReader::DeclIsFromPCHWithObjectFile(const Decl *D) {
- ModuleFile *MF = getOwningModuleFile(D);
- return MF && MF->PCHHasObjectFile;
-}
-
ModuleFile *ASTReader::getLocalModuleFile(ModuleFile &F, unsigned ID) {
if (ID & 1) {
// It's a module, look it up by submodule ID.
@@ -8947,65 +8978,6 @@ ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
return SourceRange(beg, end);
}
-static FixedPointSemantics
-ReadFixedPointSemantics(const SmallVectorImpl<uint64_t> &Record,
- unsigned &Idx) {
- unsigned Width = Record[Idx++];
- unsigned Scale = Record[Idx++];
- uint64_t Tmp = Record[Idx++];
- bool IsSigned = Tmp & 0x1;
- bool IsSaturated = Tmp & 0x2;
- bool HasUnsignedPadding = Tmp & 0x4;
- return FixedPointSemantics(Width, Scale, IsSigned, IsSaturated,
- HasUnsignedPadding);
-}
-
-static const llvm::fltSemantics &
-readAPFloatSemantics(ASTRecordReader &reader) {
- return llvm::APFloatBase::EnumToSemantics(
- static_cast<llvm::APFloatBase::Semantics>(reader.readInt()));
-}
-
-APValue ASTRecordReader::readAPValue() {
- unsigned Kind = readInt();
- switch ((APValue::ValueKind) Kind) {
- case APValue::None:
- return APValue();
- case APValue::Indeterminate:
- return APValue::IndeterminateValue();
- case APValue::Int:
- return APValue(readAPSInt());
- case APValue::Float: {
- const llvm::fltSemantics &FloatSema = readAPFloatSemantics(*this);
- return APValue(readAPFloat(FloatSema));
- }
- case APValue::FixedPoint: {
- FixedPointSemantics FPSema = ReadFixedPointSemantics(Record, Idx);
- return APValue(APFixedPoint(readAPInt(), FPSema));
- }
- case APValue::ComplexInt: {
- llvm::APSInt First = readAPSInt();
- return APValue(std::move(First), readAPSInt());
- }
- case APValue::ComplexFloat: {
- const llvm::fltSemantics &FloatSema1 = readAPFloatSemantics(*this);
- llvm::APFloat First = readAPFloat(FloatSema1);
- const llvm::fltSemantics &FloatSema2 = readAPFloatSemantics(*this);
- return APValue(std::move(First), readAPFloat(FloatSema2));
- }
- case APValue::LValue:
- case APValue::Vector:
- case APValue::Array:
- case APValue::Struct:
- case APValue::Union:
- case APValue::MemberPointer:
- case APValue::AddrLabelDiff:
- // TODO : Handle all these APValue::ValueKind.
- return APValue();
- }
- llvm_unreachable("Invalid APValue::ValueKind");
-}
-
/// Read a floating-point value
llvm::APFloat ASTRecordReader::readAPFloat(const llvm::fltSemantics &Sem) {
return llvm::APFloat(Sem, readAPInt());
@@ -9180,7 +9152,7 @@ void ASTReader::visitTopLevelModuleMaps(
InputFileInfo IFI = readInputFileInfo(MF, I + 1);
if (IFI.TopLevelModuleMap)
// FIXME: This unnecessarily re-reads the InputFileInfo.
- if (auto *FE = getInputFile(MF, I + 1).getFile())
+ if (auto FE = getInputFile(MF, I + 1).getFile())
Visitor(FE);
}
}
@@ -11661,12 +11633,13 @@ ASTReader::ASTReader(Preprocessor &PP, InMemoryModuleCache &ModuleCache,
ASTContext *Context,
const PCHContainerReader &PCHContainerRdr,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
- StringRef isysroot, bool DisableValidation,
+ StringRef isysroot,
+ DisableValidationForModuleKind DisableValidationKind,
bool AllowASTWithCompilerErrors,
bool AllowConfigurationMismatch, bool ValidateSystemInputs,
bool ValidateASTInputFilesContent, bool UseGlobalIndex,
std::unique_ptr<llvm::Timer> ReadTimer)
- : Listener(DisableValidation
+ : Listener(bool(DisableValidationKind &DisableValidationForModuleKind::PCH)
? cast<ASTReaderListener>(new SimpleASTReaderListener(PP))
: cast<ASTReaderListener>(new PCHValidator(PP, *this))),
SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()),
@@ -11674,7 +11647,7 @@ ASTReader::ASTReader(Preprocessor &PP, InMemoryModuleCache &ModuleCache,
ContextObj(Context), ModuleMgr(PP.getFileManager(), ModuleCache,
PCHContainerRdr, PP.getHeaderSearchInfo()),
DummyIdResolver(PP), ReadTimer(std::move(ReadTimer)), isysroot(isysroot),
- DisableValidation(DisableValidation),
+ DisableValidationKind(DisableValidationKind),
AllowASTWithCompilerErrors(AllowASTWithCompilerErrors),
AllowConfigurationMismatch(AllowConfigurationMismatch),
ValidateSystemInputs(ValidateSystemInputs),
@@ -11725,9 +11698,9 @@ class OMPClauseReader : public OMPClauseVisitor<OMPClauseReader> {
public:
OMPClauseReader(ASTRecordReader &Record)
: Record(Record), Context(Record.getContext()) {}
-
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *C);
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *C);
+#include "llvm/Frontend/OpenMP/OMP.inc"
OMPClause *readClause();
void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
@@ -12531,10 +12504,10 @@ void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Record.readExpr();
+ Expr *AssociatedExprPr = Record.readExpr();
auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
+ Components.emplace_back(AssociatedExprPr, AssociatedDecl,
+ /*IsNonContiguous=*/false);
}
C->setComponents(Components, ListSizes);
}
@@ -12608,8 +12581,14 @@ void OMPClauseReader::VisitOMPDefaultmapClause(OMPDefaultmapClause *C) {
void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
C->setLParenLoc(Record.readSourceLocation());
+ for (unsigned I = 0; I < NumberOfOMPMotionModifiers; ++I) {
+ C->setMotionModifier(
+ I, static_cast<OpenMPMotionModifierKind>(Record.readInt()));
+ C->setMotionModifierLoc(I, Record.readSourceLocation());
+ }
C->setMapperQualifierLoc(Record.readNestedNameSpecifierLoc());
C->setMapperIdInfo(Record.readDeclarationNameInfo());
+ C->setColonLoc(Record.readSourceLocation());
auto NumVars = C->varlist_size();
auto UniqueDecls = C->getUniqueDeclarationsNum();
auto TotalLists = C->getTotalComponentListNum();
@@ -12648,18 +12627,24 @@ void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Record.readSubExpr();
+ Expr *AssociatedExprPr = Record.readSubExpr();
+ bool IsNonContiguous = Record.readBool();
auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
+ Components.emplace_back(AssociatedExprPr, AssociatedDecl, IsNonContiguous);
}
C->setComponents(Components, ListSizes);
}
void OMPClauseReader::VisitOMPFromClause(OMPFromClause *C) {
C->setLParenLoc(Record.readSourceLocation());
+ for (unsigned I = 0; I < NumberOfOMPMotionModifiers; ++I) {
+ C->setMotionModifier(
+ I, static_cast<OpenMPMotionModifierKind>(Record.readInt()));
+ C->setMotionModifierLoc(I, Record.readSourceLocation());
+ }
C->setMapperQualifierLoc(Record.readNestedNameSpecifierLoc());
C->setMapperIdInfo(Record.readDeclarationNameInfo());
+ C->setColonLoc(Record.readSourceLocation());
auto NumVars = C->varlist_size();
auto UniqueDecls = C->getUniqueDeclarationsNum();
auto TotalLists = C->getTotalComponentListNum();
@@ -12698,10 +12683,10 @@ void OMPClauseReader::VisitOMPFromClause(OMPFromClause *C) {
SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Record.readSubExpr();
+ Expr *AssociatedExprPr = Record.readSubExpr();
+ bool IsNonContiguous = Record.readBool();
auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
+ Components.emplace_back(AssociatedExprPr, AssociatedDecl, IsNonContiguous);
}
C->setComponents(Components, ListSizes);
}
@@ -12748,10 +12733,10 @@ void OMPClauseReader::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Record.readSubExpr();
+ auto *AssociatedExprPr = Record.readSubExpr();
auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
+ Components.emplace_back(AssociatedExprPr, AssociatedDecl,
+ /*IsNonContiguous=*/false);
}
C->setComponents(Components, ListSizes);
}
@@ -12792,8 +12777,8 @@ void OMPClauseReader::VisitOMPUseDeviceAddrClause(OMPUseDeviceAddrClause *C) {
for (unsigned i = 0; i < TotalComponents; ++i) {
Expr *AssociatedExpr = Record.readSubExpr();
auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
+ Components.emplace_back(AssociatedExpr, AssociatedDecl,
+ /*IsNonContiguous*/ false);
}
C->setComponents(Components, ListSizes);
}
@@ -12835,8 +12820,8 @@ void OMPClauseReader::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
for (unsigned i = 0; i < TotalComponents; ++i) {
Expr *AssociatedExpr = Record.readSubExpr();
auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
+ Components.emplace_back(AssociatedExpr, AssociatedDecl,
+ /*IsNonContiguous=*/false);
}
C->setComponents(Components, ListSizes);
}
@@ -12927,3 +12912,20 @@ OMPTraitInfo *ASTRecordReader::readOMPTraitInfo() {
}
return &TI;
}
+
+void ASTRecordReader::readOMPChildren(OMPChildren *Data) {
+ if (!Data)
+ return;
+ if (Reader->ReadingKind == ASTReader::Read_Stmt) {
+ // Skip NumClauses, NumChildren and HasAssociatedStmt fields.
+ skipInts(3);
+ }
+ SmallVector<OMPClause *, 4> Clauses(Data->getNumClauses());
+ for (unsigned I = 0, E = Data->getNumClauses(); I < E; ++I)
+ Clauses[I] = readOMPClause();
+ Data->setClauses(Clauses);
+ if (Data->hasAssociatedStmt())
+ Data->setAssociatedStmt(readStmt());
+ for (unsigned I = 0, E = Data->getNumChildren(); I < E; ++I)
+ Data->getChildren()[I] = readStmt();
+}
diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp
index eef4ab16ec15..6bfb9bd783b5 100644
--- a/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -281,6 +281,9 @@ namespace clang {
static Decl *getMostRecentDeclImpl(...);
static Decl *getMostRecentDecl(Decl *D);
+ static void mergeInheritableAttributes(ASTReader &Reader, Decl *D,
+ Decl *Previous);
+
template <typename DeclT>
static void attachPreviousDeclImpl(ASTReader &Reader,
Redeclarable<DeclT> *D, Decl *Previous,
@@ -366,6 +369,7 @@ namespace clang {
void VisitFieldDecl(FieldDecl *FD);
void VisitMSPropertyDecl(MSPropertyDecl *FD);
void VisitMSGuidDecl(MSGuidDecl *D);
+ void VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *FD);
RedeclarableResult VisitVarDeclImpl(VarDecl *D);
void VisitVarDecl(VarDecl *VD) { VisitVarDeclImpl(VD); }
@@ -504,10 +508,9 @@ uint64_t ASTDeclReader::GetCurrentCursorOffset() {
void ASTDeclReader::ReadFunctionDefinition(FunctionDecl *FD) {
if (Record.readInt()) {
- Reader.DefinitionSource[FD] = Loc.F->Kind == ModuleKind::MK_MainFile;
- if (Reader.getContext().getLangOpts().BuildingPCHWithObjectFile &&
- Reader.DeclIsFromPCHWithObjectFile(FD))
- Reader.DefinitionSource[FD] = true;
+ Reader.DefinitionSource[FD] =
+ Loc.F->Kind == ModuleKind::MK_MainFile ||
+ Reader.getContext().getLangOpts().BuildingPCHWithObjectFile;
}
if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) {
CD->setNumCtorInitializers(Record.readInt());
@@ -583,7 +586,7 @@ void ASTDeclReader::VisitDecl(Decl *D) {
Reader.getContext());
}
D->setLocation(ThisDeclLoc);
- D->setInvalidDecl(Record.readInt());
+ D->InvalidDecl = Record.readInt();
if (Record.readInt()) { // hasAttrs
AttrVec Attrs;
Record.readAttributes(Attrs);
@@ -865,7 +868,10 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
FD->setInlineSpecified(Record.readInt());
FD->setImplicitlyInline(Record.readInt());
FD->setVirtualAsWritten(Record.readInt());
- FD->setPure(Record.readInt());
+ // We defer calling `FunctionDecl::setPure()` here as for methods of
+ // `CXXTemplateSpecializationDecl`s, we may not have connected up the
+ // definition (which is required for `setPure`).
+ const bool Pure = Record.readInt();
FD->setHasInheritedPrototype(Record.readInt());
FD->setHasWrittenPrototype(Record.readInt());
FD->setDeletedAsWritten(Record.readInt());
@@ -885,7 +891,6 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
FD->ODRHash = Record.readInt();
FD->setHasODRHash(true);
- FD->setUsesFPIntrin(Record.readInt());
if (FD->isDefaulted()) {
if (unsigned NumLookups = Record.readInt()) {
@@ -1013,6 +1018,10 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
}
}
+ // Defer calling `setPure` until merging above has guaranteed we've set
+ // `DefinitionData` (as this will need to access it).
+ FD->setPure(Pure);
+
// Read in the parameters.
unsigned NumParams = Record.readInt();
SmallVector<ParmVarDecl *, 16> Params;
@@ -1375,6 +1384,17 @@ void ASTDeclReader::VisitMSGuidDecl(MSGuidDecl *D) {
Reader.getContext().setPrimaryMergedDecl(D, Existing->getCanonicalDecl());
}
+void ASTDeclReader::VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D) {
+ VisitValueDecl(D);
+ D->Value = Record.readAPValue();
+
+ // Add this template parameter object to the AST context's lookup structure,
+ // and merge if needed.
+ if (TemplateParamObjectDecl *Existing =
+ Reader.getContext().TemplateParamObjectDecls.GetOrInsertNode(D))
+ Reader.getContext().setPrimaryMergedDecl(D, Existing->getCanonicalDecl());
+}
+
void ASTDeclReader::VisitIndirectFieldDecl(IndirectFieldDecl *FD) {
VisitValueDecl(FD);
@@ -1421,10 +1441,9 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
if (uint64_t Val = Record.readInt()) {
VD->setInit(Record.readExpr());
- if (Val > 1) {
+ if (Val != 1) {
EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
- Eval->CheckedICE = true;
- Eval->IsICE = (Val & 1) != 0;
+ Eval->HasConstantInitialization = (Val & 2) != 0;
Eval->HasConstantDestruction = (Val & 4) != 0;
}
}
@@ -1436,10 +1455,9 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
}
if (VD->getStorageDuration() == SD_Static && Record.readInt()) {
- Reader.DefinitionSource[VD] = Loc.F->Kind == ModuleKind::MK_MainFile;
- if (Reader.getContext().getLangOpts().BuildingPCHWithObjectFile &&
- Reader.DeclIsFromPCHWithObjectFile(VD))
- Reader.DefinitionSource[VD] = true;
+ Reader.DefinitionSource[VD] =
+ Loc.F->Kind == ModuleKind::MK_MainFile ||
+ Reader.getContext().getLangOpts().BuildingPCHWithObjectFile;
}
enum VarKind {
@@ -1700,10 +1718,9 @@ void ASTDeclReader::ReadCXXDefinitionData(
Data.HasODRHash = true;
if (Record.readInt()) {
- Reader.DefinitionSource[D] = Loc.F->Kind == ModuleKind::MK_MainFile;
- if (Reader.getContext().getLangOpts().BuildingPCHWithObjectFile &&
- Reader.DeclIsFromPCHWithObjectFile(D))
- Reader.DefinitionSource[D] = true;
+ Reader.DefinitionSource[D] =
+ Loc.F->Kind == ModuleKind::MK_MainFile ||
+ Reader.getContext().getLangOpts().BuildingPCHWithObjectFile;
}
Data.NumBases = Record.readInt();
@@ -2410,8 +2427,10 @@ void ASTDeclReader::VisitLifetimeExtendedTemporaryDecl(
VisitDecl(D);
D->ExtendingDecl = readDeclAs<ValueDecl>();
D->ExprWithTemporary = Record.readStmt();
- if (Record.readInt())
+ if (Record.readInt()) {
D->Value = new (D->getASTContext()) APValue(Record.readAPValue());
+ D->getASTContext().addDestruction(D->Value);
+ }
D->ManglingNumber = Record.readInt();
mergeMergeable(D);
}
@@ -2655,41 +2674,18 @@ void ASTDeclReader::mergeMergeable(Mergeable<T> *D) {
}
void ASTDeclReader::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
+ Record.readOMPChildren(D->Data);
VisitDecl(D);
- unsigned NumVars = D->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i) {
- Vars.push_back(Record.readExpr());
- }
- D->setVars(Vars);
}
void ASTDeclReader::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
+ Record.readOMPChildren(D->Data);
VisitDecl(D);
- unsigned NumVars = D->varlist_size();
- unsigned NumClauses = D->clauselist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i) {
- Vars.push_back(Record.readExpr());
- }
- D->setVars(Vars);
- SmallVector<OMPClause *, 8> Clauses;
- Clauses.reserve(NumClauses);
- for (unsigned I = 0; I != NumClauses; ++I)
- Clauses.push_back(Record.readOMPClause());
- D->setClauses(Clauses);
}
void ASTDeclReader::VisitOMPRequiresDecl(OMPRequiresDecl * D) {
+ Record.readOMPChildren(D->Data);
VisitDecl(D);
- unsigned NumClauses = D->clauselist_size();
- SmallVector<OMPClause *, 8> Clauses;
- Clauses.reserve(NumClauses);
- for (unsigned I = 0; I != NumClauses; ++I)
- Clauses.push_back(Record.readOMPClause());
- D->setClauses(Clauses);
}
void ASTDeclReader::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
@@ -2710,18 +2706,10 @@ void ASTDeclReader::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
}
void ASTDeclReader::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
+ Record.readOMPChildren(D->Data);
VisitValueDecl(D);
- D->setLocation(readSourceLocation());
- Expr *MapperVarRefE = Record.readExpr();
- D->setMapperVarRef(MapperVarRefE);
D->VarName = Record.readDeclarationName();
D->PrevDeclInScope = readDeclID();
- unsigned NumClauses = D->clauselist_size();
- SmallVector<OMPClause *, 8> Clauses;
- Clauses.reserve(NumClauses);
- for (unsigned I = 0; I != NumClauses; ++I)
- Clauses.push_back(Record.readOMPClause());
- D->setClauses(Clauses);
}
void ASTDeclReader::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
@@ -2912,9 +2900,11 @@ static bool isSameTemplateParameter(const NamedDecl *X,
return false;
if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
return false;
- if (TX->hasTypeConstraint()) {
- const TypeConstraint *TXTC = TX->getTypeConstraint();
- const TypeConstraint *TYTC = TY->getTypeConstraint();
+ const TypeConstraint *TXTC = TX->getTypeConstraint();
+ const TypeConstraint *TYTC = TY->getTypeConstraint();
+ if (!TXTC != !TYTC)
+ return false;
+ if (TXTC && TYTC) {
if (TXTC->getNamedConcept() != TYTC->getNamedConcept())
return false;
if (TXTC->hasExplicitTemplateArgs() != TYTC->hasExplicitTemplateArgs())
@@ -3543,6 +3533,19 @@ Decl *ASTReader::getMostRecentExistingDecl(Decl *D) {
return ASTDeclReader::getMostRecentDecl(D->getCanonicalDecl());
}
+void ASTDeclReader::mergeInheritableAttributes(ASTReader &Reader, Decl *D,
+ Decl *Previous) {
+ InheritableAttr *NewAttr = nullptr;
+ ASTContext &Context = Reader.getContext();
+ const auto *IA = Previous->getAttr<MSInheritanceAttr>();
+
+ if (IA && !D->hasAttr<MSInheritanceAttr>()) {
+ NewAttr = cast<InheritableAttr>(IA->clone(Context));
+ NewAttr->setInherited(true);
+ D->addAttr(NewAttr);
+ }
+}
+
template<typename DeclT>
void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
Redeclarable<DeclT> *D,
@@ -3701,6 +3704,12 @@ void ASTDeclReader::attachPreviousDecl(ASTReader &Reader, Decl *D,
if (auto *TD = dyn_cast<TemplateDecl>(D))
inheritDefaultTemplateArguments(Reader.getContext(),
cast<TemplateDecl>(Previous), TD);
+
+ // If any of the declaration in the chain contains an Inheritable attribute,
+ // it needs to be added to all the declarations in the redeclarable chain.
+ // FIXME: Only the logic of merging MSInheritableAttr is present, it should
+ // be extended for all inheritable attributes.
+ mergeInheritableAttributes(Reader, D, Previous);
}
template<typename DeclT>
@@ -3994,6 +4003,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_MS_GUID:
D = MSGuidDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_TEMPLATE_PARAM_OBJECT:
+ D = TemplateParamObjectDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_CAPTURED:
D = CapturedDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
@@ -4008,24 +4020,35 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
// locations.
D = ImportDecl::CreateDeserialized(Context, ID, Record.back());
break;
- case DECL_OMP_THREADPRIVATE:
- D = OMPThreadPrivateDecl::CreateDeserialized(Context, ID, Record.readInt());
+ case DECL_OMP_THREADPRIVATE: {
+ Record.skipInts(1);
+ unsigned NumChildren = Record.readInt();
+ Record.skipInts(1);
+ D = OMPThreadPrivateDecl::CreateDeserialized(Context, ID, NumChildren);
break;
+ }
case DECL_OMP_ALLOCATE: {
- unsigned NumVars = Record.readInt();
unsigned NumClauses = Record.readInt();
+ unsigned NumVars = Record.readInt();
+ Record.skipInts(1);
D = OMPAllocateDecl::CreateDeserialized(Context, ID, NumVars, NumClauses);
break;
}
- case DECL_OMP_REQUIRES:
- D = OMPRequiresDecl::CreateDeserialized(Context, ID, Record.readInt());
+ case DECL_OMP_REQUIRES: {
+ unsigned NumClauses = Record.readInt();
+ Record.skipInts(2);
+ D = OMPRequiresDecl::CreateDeserialized(Context, ID, NumClauses);
break;
+ }
case DECL_OMP_DECLARE_REDUCTION:
D = OMPDeclareReductionDecl::CreateDeserialized(Context, ID);
break;
- case DECL_OMP_DECLARE_MAPPER:
- D = OMPDeclareMapperDecl::CreateDeserialized(Context, ID, Record.readInt());
+ case DECL_OMP_DECLARE_MAPPER: {
+ unsigned NumClauses = Record.readInt();
+ Record.skipInts(2);
+ D = OMPDeclareMapperDecl::CreateDeserialized(Context, ID, NumClauses);
break;
+ }
case DECL_OMP_CAPTUREDEXPR:
D = OMPCapturedExprDecl::CreateDeserialized(Context, ID);
break;
@@ -4437,10 +4460,10 @@ void ASTDeclReader::UpdateDecl(Decl *D,
uint64_t Val = Record.readInt();
if (Val && !VD->getInit()) {
VD->setInit(Record.readExpr());
- if (Val > 1) { // IsInitKnownICE = 1, IsInitNotICE = 2, IsInitICE = 3
+ if (Val != 1) {
EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
- Eval->CheckedICE = true;
- Eval->IsICE = Val == 3;
+ Eval->HasConstantInitialization = (Val & 2) != 0;
+ Eval->HasConstantDestruction = (Val & 4) != 0;
}
}
break;
@@ -4665,12 +4688,11 @@ void ASTDeclReader::UpdateDecl(Decl *D,
}
case UPD_DECL_MARKED_OPENMP_DECLARETARGET: {
- OMPDeclareTargetDeclAttr::MapTypeTy MapType =
- static_cast<OMPDeclareTargetDeclAttr::MapTypeTy>(Record.readInt());
- OMPDeclareTargetDeclAttr::DevTypeTy DevType =
- static_cast<OMPDeclareTargetDeclAttr::DevTypeTy>(Record.readInt());
+ auto MapType = Record.readEnum<OMPDeclareTargetDeclAttr::MapTypeTy>();
+ auto DevType = Record.readEnum<OMPDeclareTargetDeclAttr::DevTypeTy>();
+ unsigned Level = Record.readInt();
D->addAttr(OMPDeclareTargetDeclAttr::CreateImplicit(
- Reader.getContext(), MapType, DevType, readSourceRange(),
+ Reader.getContext(), MapType, DevType, Level, readSourceRange(),
AttributeCommonInfo::AS_Pragma));
break;
}
diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp
index a40c5499a6d7..0e1af53303b4 100644
--- a/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -226,6 +226,8 @@ void ASTStmtReader::VisitIfStmt(IfStmt *S) {
S->setInit(Record.readSubStmt());
S->setIfLoc(readSourceLocation());
+ S->setLParenLoc(readSourceLocation());
+ S->setRParenLoc(readSourceLocation());
if (HasElse)
S->setElseLoc(readSourceLocation());
}
@@ -247,6 +249,8 @@ void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
S->setSwitchLoc(readSourceLocation());
+ S->setLParenLoc(readSourceLocation());
+ S->setRParenLoc(readSourceLocation());
SwitchCase *PrevSC = nullptr;
for (auto E = Record.size(); Record.getIdx() != E; ) {
@@ -610,7 +614,7 @@ void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
E->getTrailingObjects<TemplateArgumentLoc>(), NumTemplateArgs);
- E->setDecl(readDeclAs<ValueDecl>());
+ E->D = readDeclAs<ValueDecl>();
E->setLocation(readSourceLocation());
E->DNLoc = Record.readDeclarationNameLoc(E->getDecl()->getDeclName());
}
@@ -708,7 +712,8 @@ void ASTStmtReader::VisitUnaryOperator(UnaryOperator *E) {
E->setOperatorLoc(readSourceLocation());
E->setCanOverflow(Record.readInt());
if (hasFP_Features)
- E->setStoredFPFeatures(FPOptionsOverride(Record.readInt()));
+ E->setStoredFPFeatures(
+ FPOptionsOverride::getFromOpaqueInt(Record.readInt()));
}
void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
@@ -991,12 +996,16 @@ void ASTStmtReader::VisitOMPIteratorExpr(OMPIteratorExpr *E) {
void ASTStmtReader::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
unsigned NumArgs = Record.readInt();
+ bool HasFPFeatures = Record.readInt();
assert((NumArgs == E->getNumArgs()) && "Wrong NumArgs!");
E->setRParenLoc(readSourceLocation());
E->setCallee(Record.readSubExpr());
for (unsigned I = 0; I != NumArgs; ++I)
E->setArg(I, Record.readSubExpr());
E->setADLCallKind(static_cast<CallExpr::ADLCallKind>(Record.readInt()));
+ if (HasFPFeatures)
+ E->setStoredFPFeatures(
+ FPOptionsOverride::getFromOpaqueInt(Record.readInt()));
}
void ASTStmtReader::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
@@ -1073,6 +1082,8 @@ void ASTStmtReader::VisitCastExpr(CastExpr *E) {
VisitExpr(E);
unsigned NumBaseSpecs = Record.readInt();
assert(NumBaseSpecs == E->path_size());
+ unsigned HasFPFeatures = Record.readInt();
+ assert(E->hasStoredFPFeatures() == HasFPFeatures);
E->setSubExpr(Record.readSubExpr());
E->setCastKind((CastKind)Record.readInt());
CastExpr::path_iterator BaseI = E->path_begin();
@@ -1081,6 +1092,9 @@ void ASTStmtReader::VisitCastExpr(CastExpr *E) {
*BaseSpec = Record.readCXXBaseSpecifier();
*BaseI++ = BaseSpec;
}
+ if (HasFPFeatures)
+ *E->getTrailingFPFeatures() =
+ FPOptionsOverride::getFromOpaqueInt(Record.readInt());
}
void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
@@ -1093,7 +1107,8 @@ void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
E->setRHS(Record.readSubExpr());
E->setOperatorLoc(readSourceLocation());
if (hasFP_Features)
- E->setStoredFPFeatures(FPOptionsOverride(Record.readInt()));
+ E->setStoredFPFeatures(
+ FPOptionsOverride::getFromOpaqueInt(Record.readInt()));
}
void ASTStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
@@ -1662,7 +1677,6 @@ void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
E->CXXOperatorCallExprBits.OperatorKind = Record.readInt();
E->Range = Record.readSourceRange();
- E->setFPFeatures(FPOptionsOverride(Record.readInt()));
}
void ASTStmtReader::VisitCXXRewrittenBinaryOperator(
@@ -1976,10 +1990,10 @@ ASTStmtReader::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
void
ASTStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
VisitExpr(E);
- assert(Record.peekInt() == E->arg_size() &&
+ assert(Record.peekInt() == E->getNumArgs() &&
"Read wrong record during creation ?");
Record.skipInts(1);
- for (unsigned I = 0, N = E->arg_size(); I != N; ++I)
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
E->setArg(I, Record.readSubExpr());
E->TSI = readTypeSourceInfo();
E->setLParenLoc(readSourceLocation());
@@ -2107,7 +2121,8 @@ void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
VisitExpr(E);
- E->Param = readDeclAs<NonTypeTemplateParmDecl>();
+ E->ParamAndRef.setPointer(readDeclAs<NonTypeTemplateParmDecl>());
+ E->ParamAndRef.setInt(Record.readInt());
E->SubstNonTypeTemplateParmExprBits.NameLoc = readSourceLocation();
E->Replacement = Record.readSubExpr();
}
@@ -2152,6 +2167,7 @@ void ASTStmtReader::VisitCXXFoldExpr(CXXFoldExpr *E) {
E->NumExpansions = Record.readInt();
E->SubExprs[0] = Record.readSubExpr();
E->SubExprs[1] = Record.readSubExpr();
+ E->SubExprs[2] = Record.readSubExpr();
E->Opcode = (BinaryOperatorKind)Record.readInt();
}
@@ -2171,9 +2187,9 @@ void ASTStmtReader::VisitRecoveryExpr(RecoveryExpr *E) {
unsigned NumArgs = Record.readInt();
E->BeginLoc = readSourceLocation();
E->EndLoc = readSourceLocation();
- assert(
- (NumArgs == std::distance(E->children().begin(), E->children().end())) &&
- "Wrong NumArgs!");
+ assert((NumArgs + 0LL ==
+ std::distance(E->children().begin(), E->children().end())) &&
+ "Wrong NumArgs!");
(void)NumArgs;
for (Stmt *&Child : E->children())
Child = Record.readSubStmt();
@@ -2258,99 +2274,22 @@ void ASTStmtReader::VisitAsTypeExpr(AsTypeExpr *E) {
//===----------------------------------------------------------------------===//
void ASTStmtReader::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
+ Record.readOMPChildren(E->Data);
E->setLocStart(readSourceLocation());
E->setLocEnd(readSourceLocation());
- SmallVector<OMPClause *, 5> Clauses;
- for (unsigned i = 0; i < E->getNumClauses(); ++i)
- Clauses.push_back(Record.readOMPClause());
- E->setClauses(Clauses);
- if (E->hasAssociatedStmt())
- E->setAssociatedStmt(Record.readSubStmt());
}
void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
VisitStmt(D);
- // Two fields (NumClauses and CollapsedNum) were read in ReadStmtFromStream.
- Record.skipInts(2);
+ // Field CollapsedNum was read in ReadStmtFromStream.
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setIterationVariable(Record.readSubExpr());
- D->setLastIteration(Record.readSubExpr());
- D->setCalcLastIteration(Record.readSubExpr());
- D->setPreCond(Record.readSubExpr());
- D->setCond(Record.readSubExpr());
- D->setInit(Record.readSubExpr());
- D->setInc(Record.readSubExpr());
- D->setPreInits(Record.readSubStmt());
- if (isOpenMPWorksharingDirective(D->getDirectiveKind()) ||
- isOpenMPTaskLoopDirective(D->getDirectiveKind()) ||
- isOpenMPDistributeDirective(D->getDirectiveKind())) {
- D->setIsLastIterVariable(Record.readSubExpr());
- D->setLowerBoundVariable(Record.readSubExpr());
- D->setUpperBoundVariable(Record.readSubExpr());
- D->setStrideVariable(Record.readSubExpr());
- D->setEnsureUpperBound(Record.readSubExpr());
- D->setNextLowerBound(Record.readSubExpr());
- D->setNextUpperBound(Record.readSubExpr());
- D->setNumIterations(Record.readSubExpr());
- }
- if (isOpenMPLoopBoundSharingDirective(D->getDirectiveKind())) {
- D->setPrevLowerBoundVariable(Record.readSubExpr());
- D->setPrevUpperBoundVariable(Record.readSubExpr());
- D->setDistInc(Record.readSubExpr());
- D->setPrevEnsureUpperBound(Record.readSubExpr());
- D->setCombinedLowerBoundVariable(Record.readSubExpr());
- D->setCombinedUpperBoundVariable(Record.readSubExpr());
- D->setCombinedEnsureUpperBound(Record.readSubExpr());
- D->setCombinedInit(Record.readSubExpr());
- D->setCombinedCond(Record.readSubExpr());
- D->setCombinedNextLowerBound(Record.readSubExpr());
- D->setCombinedNextUpperBound(Record.readSubExpr());
- D->setCombinedDistCond(Record.readSubExpr());
- D->setCombinedParForInDistCond(Record.readSubExpr());
- }
- SmallVector<Expr *, 4> Sub;
- unsigned CollapsedNum = D->getCollapsedNumber();
- Sub.reserve(CollapsedNum);
- for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Record.readSubExpr());
- D->setCounters(Sub);
- Sub.clear();
- for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Record.readSubExpr());
- D->setPrivateCounters(Sub);
- Sub.clear();
- for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Record.readSubExpr());
- D->setInits(Sub);
- Sub.clear();
- for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Record.readSubExpr());
- D->setUpdates(Sub);
- Sub.clear();
- for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Record.readSubExpr());
- D->setFinals(Sub);
- Sub.clear();
- for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Record.readSubExpr());
- D->setDependentCounters(Sub);
- Sub.clear();
- for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Record.readSubExpr());
- D->setDependentInits(Sub);
- Sub.clear();
- for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Record.readSubExpr());
- D->setFinalsConditions(Sub);
}
void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
@@ -2359,8 +2298,7 @@ void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPForSimdDirective(OMPForSimdDirective *D) {
@@ -2369,23 +2307,18 @@ void ASTStmtReader::VisitOMPForSimdDirective(OMPForSimdDirective *D) {
void ASTStmtReader::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPSectionDirective(OMPSectionDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPSingleDirective(OMPSingleDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
@@ -2396,16 +2329,13 @@ void ASTStmtReader::VisitOMPMasterDirective(OMPMasterDirective *D) {
void ASTStmtReader::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
D->DirName = Record.readDeclarationNameInfo();
}
void ASTStmtReader::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
VisitOMPLoopDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPParallelForSimdDirective(
@@ -2416,28 +2346,20 @@ void ASTStmtReader::VisitOMPParallelForSimdDirective(
void ASTStmtReader::VisitOMPParallelMasterDirective(
OMPParallelMasterDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
}
void ASTStmtReader::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPTaskDirective(OMPTaskDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPTaskyieldDirective(OMPTaskyieldDirective *D) {
@@ -2457,100 +2379,73 @@ void ASTStmtReader::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *D) {
void ASTStmtReader::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setReductionRef(Record.readSubExpr());
}
void ASTStmtReader::VisitOMPFlushDirective(OMPFlushDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPDepobjDirective(OMPDepobjDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPScanDirective(OMPScanDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setX(Record.readSubExpr());
- D->setV(Record.readSubExpr());
- D->setExpr(Record.readSubExpr());
- D->setUpdateExpr(Record.readSubExpr());
- D->IsXLHSInRHSPart = Record.readInt() != 0;
- D->IsPostfixUpdate = Record.readInt() != 0;
+ D->IsXLHSInRHSPart = Record.readBool();
+ D->IsPostfixUpdate = Record.readBool();
}
void ASTStmtReader::VisitOMPTargetDirective(OMPTargetDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPTargetDataDirective(OMPTargetDataDirective *D) {
VisitStmt(D);
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPTargetEnterDataDirective(
OMPTargetEnterDataDirective *D) {
VisitStmt(D);
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPTargetExitDataDirective(
OMPTargetExitDataDirective *D) {
VisitStmt(D);
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPTargetParallelDirective(
OMPTargetParallelDirective *D) {
VisitStmt(D);
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPTargetParallelForDirective(
OMPTargetParallelForDirective *D) {
VisitOMPLoopDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPTeamsDirective(OMPTeamsDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
@@ -2558,20 +2453,18 @@ void ASTStmtReader::VisitOMPCancellationPointDirective(
OMPCancellationPointDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
- D->setCancelRegion(static_cast<OpenMPDirectiveKind>(Record.readInt()));
+ D->setCancelRegion(Record.readEnum<OpenMPDirectiveKind>());
}
void ASTStmtReader::VisitOMPCancelDirective(OMPCancelDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setCancelRegion(static_cast<OpenMPDirectiveKind>(Record.readInt()));
+ D->setCancelRegion(Record.readEnum<OpenMPDirectiveKind>());
}
void ASTStmtReader::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
@@ -2581,7 +2474,7 @@ void ASTStmtReader::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
void ASTStmtReader::VisitOMPMasterTaskLoopDirective(
OMPMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPMasterTaskLoopSimdDirective(
@@ -2592,7 +2485,7 @@ void ASTStmtReader::VisitOMPMasterTaskLoopSimdDirective(
void ASTStmtReader::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPParallelMasterTaskLoopSimdDirective(
@@ -2606,15 +2499,13 @@ void ASTStmtReader::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
void ASTStmtReader::VisitOMPTargetUpdateDirective(OMPTargetUpdateDirective *D) {
VisitStmt(D);
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPDistributeParallelForDirective(
OMPDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPDistributeParallelForSimdDirective(
@@ -2654,14 +2545,11 @@ void ASTStmtReader::VisitOMPTeamsDistributeParallelForSimdDirective(
void ASTStmtReader::VisitOMPTeamsDistributeParallelForDirective(
OMPTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPTargetTeamsDirective(OMPTargetTeamsDirective *D) {
VisitStmt(D);
- // The NumClauses field was read in ReadStmtFromStream.
- Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
@@ -2673,8 +2561,7 @@ void ASTStmtReader::VisitOMPTargetTeamsDistributeDirective(
void ASTStmtReader::VisitOMPTargetTeamsDistributeParallelForDirective(
OMPTargetTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
- D->setTaskReductionRefExpr(Record.readSubExpr());
- D->setHasCancel(Record.readInt());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPTargetTeamsDistributeParallelForSimdDirective(
@@ -2977,7 +2864,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CALL:
S = CallExpr::CreateEmpty(
- Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*HasFPFeatures=*/Record[ASTStmtReader::NumExprFields + 1], Empty);
break;
case EXPR_RECOVERY:
@@ -3011,13 +2899,17 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_IMPLICIT_CAST:
- S = ImplicitCastExpr::CreateEmpty(Context,
- /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ S = ImplicitCastExpr::CreateEmpty(
+ Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields],
+ /*HasFPFeatures*/ Record[ASTStmtReader::NumExprFields + 1]);
break;
case EXPR_CSTYLE_CAST:
- S = CStyleCastExpr::CreateEmpty(Context,
- /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ S = CStyleCastExpr::CreateEmpty(
+ Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields],
+ /*HasFPFeatures*/ Record[ASTStmtReader::NumExprFields + 1]);
break;
case EXPR_COMPOUND_LITERAL:
@@ -3246,24 +3138,24 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_OMP_SIMD_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPSimdDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_FOR_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPForDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
Empty);
break;
}
case STMT_OMP_FOR_SIMD_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPForSimdDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
Empty);
break;
@@ -3293,16 +3185,16 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_OMP_PARALLEL_FOR_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPParallelForDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_PARALLEL_FOR_SIMD_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPParallelForSimdDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
@@ -3355,10 +3247,13 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
- case STMT_OMP_ORDERED_DIRECTIVE:
- S = OMPOrderedDirective::CreateEmpty(
- Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ case STMT_OMP_ORDERED_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ bool HasAssociatedStmt = Record[ASTStmtReader::NumStmtFields + 2];
+ S = OMPOrderedDirective::CreateEmpty(Context, NumClauses,
+ !HasAssociatedStmt, Empty);
break;
+ }
case STMT_OMP_ATOMIC_DIRECTIVE:
S = OMPAtomicDirective::CreateEmpty(
@@ -3391,8 +3286,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_OMP_TARGET_PARALLEL_FOR_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTargetParallelForDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
@@ -3418,72 +3313,72 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_OMP_TASKLOOP_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTaskLoopDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
Empty);
break;
}
case STMT_OMP_TASKLOOP_SIMD_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTaskLoopSimdDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_MASTER_TASKLOOP_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPMasterTaskLoopDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_MASTER_TASKLOOP_SIMD_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPMasterTaskLoopSimdDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPParallelMasterTaskLoopDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_PARALLEL_MASTER_TASKLOOP_SIMD_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPParallelMasterTaskLoopSimdDirective::CreateEmpty(
Context, NumClauses, CollapsedNum, Empty);
break;
}
case STMT_OMP_DISTRIBUTE_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPDistributeDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
Empty);
break;
}
case STMT_OMP_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPDistributeParallelForDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPDistributeParallelForSimdDirective::CreateEmpty(Context, NumClauses,
CollapsedNum,
Empty);
@@ -3491,56 +3386,56 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
}
case STMT_OMP_DISTRIBUTE_SIMD_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPDistributeSimdDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_TARGET_PARALLEL_FOR_SIMD_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTargetParallelForSimdDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_TARGET_SIMD_DIRECTIVE: {
- auto NumClauses = Record[ASTStmtReader::NumStmtFields];
- auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTargetSimdDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
Empty);
break;
}
case STMT_OMP_TEAMS_DISTRIBUTE_DIRECTIVE: {
- auto NumClauses = Record[ASTStmtReader::NumStmtFields];
- auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
- S = OMPTeamsDistributeDirective::CreateEmpty(Context, NumClauses,
- CollapsedNum, Empty);
- break;
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTeamsDistributeDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
}
case STMT_OMP_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE: {
- unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
- unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTeamsDistributeSimdDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE: {
- auto NumClauses = Record[ASTStmtReader::NumStmtFields];
- auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTeamsDistributeParallelForSimdDirective::CreateEmpty(
Context, NumClauses, CollapsedNum, Empty);
break;
}
case STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE: {
- auto NumClauses = Record[ASTStmtReader::NumStmtFields];
- auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTeamsDistributeParallelForDirective::CreateEmpty(
Context, NumClauses, CollapsedNum, Empty);
break;
@@ -3552,32 +3447,32 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_OMP_TARGET_TEAMS_DISTRIBUTE_DIRECTIVE: {
- auto NumClauses = Record[ASTStmtReader::NumStmtFields];
- auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTargetTeamsDistributeDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE: {
- auto NumClauses = Record[ASTStmtReader::NumStmtFields];
- auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTargetTeamsDistributeParallelForDirective::CreateEmpty(
Context, NumClauses, CollapsedNum, Empty);
break;
}
case STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE: {
- auto NumClauses = Record[ASTStmtReader::NumStmtFields];
- auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTargetTeamsDistributeParallelForSimdDirective::CreateEmpty(
Context, NumClauses, CollapsedNum, Empty);
break;
}
case STMT_OMP_TARGET_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE: {
- auto NumClauses = Record[ASTStmtReader::NumStmtFields];
- auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPTargetTeamsDistributeSimdDirective::CreateEmpty(
Context, NumClauses, CollapsedNum, Empty);
break;
@@ -3585,12 +3480,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CXX_OPERATOR_CALL:
S = CXXOperatorCallExpr::CreateEmpty(
- Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*HasFPFeatures=*/Record[ASTStmtReader::NumExprFields + 1], Empty);
break;
case EXPR_CXX_MEMBER_CALL:
S = CXXMemberCallExpr::CreateEmpty(
- Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*HasFPFeatures=*/Record[ASTStmtReader::NumExprFields + 1], Empty);
break;
case EXPR_CXX_REWRITTEN_BINARY_OPERATOR:
@@ -3614,8 +3511,10 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CXX_STATIC_CAST:
- S = CXXStaticCastExpr::CreateEmpty(Context,
- /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ S = CXXStaticCastExpr::CreateEmpty(
+ Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields],
+ /*HasFPFeatures*/ Record[ASTStmtReader::NumExprFields + 1]);
break;
case EXPR_CXX_DYNAMIC_CAST:
@@ -3637,8 +3536,10 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CXX_FUNCTIONAL_CAST:
- S = CXXFunctionalCastExpr::CreateEmpty(Context,
- /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ S = CXXFunctionalCastExpr::CreateEmpty(
+ Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields],
+ /*HasFPFeatures*/ Record[ASTStmtReader::NumExprFields + 1]);
break;
case EXPR_BUILTIN_BIT_CAST:
@@ -3648,7 +3549,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_USER_DEFINED_LITERAL:
S = UserDefinedLiteral::CreateEmpty(
- Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*HasFPFeatures=*/Record[ASTStmtReader::NumExprFields + 1], Empty);
break;
case EXPR_CXX_STD_INITIALIZER_LIST:
@@ -3831,7 +3733,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CUDA_KERNEL_CALL:
S = CUDAKernelCallExpr::CreateEmpty(
- Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*HasFPFeatures=*/Record[ASTStmtReader::NumExprFields + 1], Empty);
break;
case EXPR_ASTYPE:
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index 2345a12caeb2..6bfa7b0e7d6d 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -81,7 +81,7 @@
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@@ -198,8 +198,8 @@ void TypeLocWriter::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
Record.AddSourceLocation(TL.getBuiltinLoc());
if (TL.needsExtraLocalData()) {
Record.push_back(TL.getWrittenTypeSpec());
- Record.push_back(TL.getWrittenSignSpec());
- Record.push_back(TL.getWrittenWidthSpec());
+ Record.push_back(static_cast<uint64_t>(TL.getWrittenSignSpec()));
+ Record.push_back(static_cast<uint64_t>(TL.getWrittenWidthSpec()));
Record.push_back(TL.hasModeAttr());
}
}
@@ -1119,7 +1119,6 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang min.
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Timestamps
- MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // PCHHasObjectFile
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Errors
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag
unsigned MetadataAbbrevCode = Stream.EmitAbbrev(std::move(MetadataAbbrev));
@@ -1134,7 +1133,6 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
CLANG_VERSION_MINOR,
!isysroot.empty(),
IncludeTimestamps,
- Context.getLangOpts().BuildingPCHWithObjectFile,
ASTHasCompilerErrors};
Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
getClangFullRepositoryVersion());
@@ -1274,6 +1272,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
const TargetOptions &TargetOpts = Target.getTargetOpts();
AddString(TargetOpts.Triple, Record);
AddString(TargetOpts.CPU, Record);
+ AddString(TargetOpts.TuneCPU, Record);
AddString(TargetOpts.ABI, Record);
Record.push_back(TargetOpts.FeaturesAsWritten.size());
for (unsigned I = 0, N = TargetOpts.FeaturesAsWritten.size(); I != N; ++I) {
@@ -1321,6 +1320,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Record.push_back(HSOpts.DisableModuleHash);
Record.push_back(HSOpts.ImplicitModuleMaps);
Record.push_back(HSOpts.ModuleMapFileHomeIsCwd);
+ Record.push_back(HSOpts.EnablePrebuiltImplicitModules);
Record.push_back(HSOpts.UseBuiltinIncludes);
Record.push_back(HSOpts.UseStandardSystemIncludes);
Record.push_back(HSOpts.UseStandardCXXIncludes);
@@ -1453,7 +1453,7 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
if (!SLoc->isFile())
continue;
const SrcMgr::FileInfo &File = SLoc->getFile();
- const SrcMgr::ContentCache *Cache = File.getContentCache();
+ const SrcMgr::ContentCache *Cache = &File.getContentCache();
if (!Cache->OrigEntry)
continue;
@@ -1469,7 +1469,7 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
if (PP->getHeaderSearchInfo()
.getHeaderSearchOpts()
.ValidateASTInputFilesContent) {
- auto *MemBuff = Cache->getRawBuffer();
+ auto MemBuff = Cache->getBufferIfLoaded();
if (MemBuff)
ContentHash = hash_value(MemBuff->getBuffer());
else
@@ -1622,7 +1622,7 @@ namespace {
ASTWriter &Writer;
// Keep track of the framework names we've used during serialization.
- SmallVector<char, 128> FrameworkStringData;
+ SmallString<128> FrameworkStringData;
llvm::StringMap<unsigned> FrameworkNameOffset;
public:
@@ -1709,8 +1709,7 @@ namespace {
= FrameworkNameOffset.find(Data.HFI.Framework);
if (Pos == FrameworkNameOffset.end()) {
Offset = FrameworkStringData.size() + 1;
- FrameworkStringData.append(Data.HFI.Framework.begin(),
- Data.HFI.Framework.end());
+ FrameworkStringData.append(Data.HFI.Framework);
FrameworkStringData.push_back(0);
FrameworkNameOffset[Data.HFI.Framework] = Offset;
@@ -1953,7 +1952,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Figure out which record code to use.
unsigned Code;
if (SLoc->isFile()) {
- const SrcMgr::ContentCache *Cache = SLoc->getFile().getContentCache();
+ const SrcMgr::ContentCache *Cache = &SLoc->getFile().getContentCache();
if (Cache->OrigEntry) {
Code = SM_SLOC_FILE_ENTRY;
} else
@@ -1971,7 +1970,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Record.push_back(File.getFileCharacteristic()); // FIXME: stable encoding
Record.push_back(File.hasLineDirectives());
- const SrcMgr::ContentCache *Content = File.getContentCache();
+ const SrcMgr::ContentCache *Content = &File.getContentCache();
bool EmitBlob = false;
if (Content->OrigEntry) {
assert(Content->OrigEntry == Content->ContentsEntry &&
@@ -2003,9 +2002,9 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// We add one to the size so that we capture the trailing NULL
// that is required by llvm::MemoryBuffer::getMemBuffer (on
// the reader side).
- const llvm::MemoryBuffer *Buffer =
- Content->getBuffer(PP.getDiagnostics(), PP.getFileManager());
- StringRef Name = Buffer->getBufferIdentifier();
+ llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ Content->getBufferOrNone(PP.getDiagnostics(), PP.getFileManager());
+ StringRef Name = Buffer ? Buffer->getBufferIdentifier() : "";
Stream.EmitRecordWithBlob(SLocBufferAbbrv, Record,
StringRef(Name.data(), Name.size() + 1));
EmitBlob = true;
@@ -2017,8 +2016,10 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
if (EmitBlob) {
// Include the implicit terminating null character in the on-disk buffer
// if we're writing it uncompressed.
- const llvm::MemoryBuffer *Buffer =
- Content->getBuffer(PP.getDiagnostics(), PP.getFileManager());
+ llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ Content->getBufferOrNone(PP.getDiagnostics(), PP.getFileManager());
+ if (!Buffer)
+ Buffer = llvm::MemoryBufferRef("<<<INVALID BUFFER>>>", "");
StringRef Blob(Buffer->getBufferStart(), Buffer->getBufferSize() + 1);
emitBlob(Stream, Blob, SLocBufferBlobCompressedAbbrv,
SLocBufferBlobAbbrv);
@@ -3276,9 +3277,8 @@ class ASTIdentifierTableTrait {
/// doesn't check whether the name has macros defined; use PublicMacroIterator
/// to check that.
bool isInterestingIdentifier(const IdentifierInfo *II, uint64_t MacroOffset) {
- if (MacroOffset ||
- II->isPoisoned() ||
- (IsModule ? II->hasRevertedBuiltin() : II->getObjCOrBuiltinID()) ||
+ if (MacroOffset || II->isPoisoned() ||
+ (!IsModule && II->getObjCOrBuiltinID()) ||
II->hasRevertedTokenIDToIdentifier() ||
(NeedDecls && II->getFETokenInfo()))
return true;
@@ -3385,7 +3385,6 @@ public:
Bits = (Bits << 1) | unsigned(HadMacroDefinition);
Bits = (Bits << 1) | unsigned(II->isExtensionToken());
Bits = (Bits << 1) | unsigned(II->isPoisoned());
- Bits = (Bits << 1) | unsigned(II->hasRevertedBuiltin());
Bits = (Bits << 1) | unsigned(II->hasRevertedTokenIDToIdentifier());
Bits = (Bits << 1) | unsigned(II->isCPlusPlusOperatorKeyword());
LE.write<uint16_t>(Bits);
@@ -3690,7 +3689,7 @@ ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
// We also build up small sets of the constructor and conversion function
// names which are visible.
- llvm::SmallSet<DeclarationName, 8> ConstructorNameSet, ConversionNameSet;
+ llvm::SmallPtrSet<DeclarationName, 8> ConstructorNameSet, ConversionNameSet;
for (auto &Lookup : *DC->buildLookup()) {
auto &Name = Lookup.first;
@@ -3979,6 +3978,7 @@ void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
Record.push_back(V.Enabled ? 1 : 0);
Record.push_back(V.Avail);
Record.push_back(V.Core);
+ Record.push_back(V.Opt);
}
Stream.EmitRecord(OPENCL_EXTENSIONS, Record);
}
@@ -4152,24 +4152,24 @@ void ASTWriter::WriteMSPointersToMembersPragmaOptions(Sema &SemaRef) {
Stream.EmitRecord(POINTERS_TO_MEMBERS_PRAGMA_OPTIONS, Record);
}
-/// Write the state of 'pragma pack' at the end of the module.
+/// Write the state of 'pragma align/pack' at the end of the module.
void ASTWriter::WritePackPragmaOptions(Sema &SemaRef) {
- // Don't serialize pragma pack state for modules, since it should only take
- // effect on a per-submodule basis.
+ // Don't serialize pragma align/pack state for modules, since it should only
+ // take effect on a per-submodule basis.
if (WritingModule)
return;
RecordData Record;
- Record.push_back(SemaRef.PackStack.CurrentValue);
- AddSourceLocation(SemaRef.PackStack.CurrentPragmaLocation, Record);
- Record.push_back(SemaRef.PackStack.Stack.size());
- for (const auto &StackEntry : SemaRef.PackStack.Stack) {
- Record.push_back(StackEntry.Value);
+ AddAlignPackInfo(SemaRef.AlignPackStack.CurrentValue, Record);
+ AddSourceLocation(SemaRef.AlignPackStack.CurrentPragmaLocation, Record);
+ Record.push_back(SemaRef.AlignPackStack.Stack.size());
+ for (const auto &StackEntry : SemaRef.AlignPackStack.Stack) {
+ AddAlignPackInfo(StackEntry.Value, Record);
AddSourceLocation(StackEntry.PragmaLocation, Record);
AddSourceLocation(StackEntry.PragmaPushLocation, Record);
AddString(StackEntry.StackSlotLabel, Record);
}
- Stream.EmitRecord(PACK_PRAGMA_OPTIONS, Record);
+ Stream.EmitRecord(ALIGN_PACK_PRAGMA_OPTIONS, Record);
}
/// Write the state of 'pragma float_control' at the end of the module.
@@ -4180,11 +4180,11 @@ void ASTWriter::WriteFloatControlPragmaOptions(Sema &SemaRef) {
return;
RecordData Record;
- Record.push_back(SemaRef.FpPragmaStack.CurrentValue);
+ Record.push_back(SemaRef.FpPragmaStack.CurrentValue.getAsOpaqueInt());
AddSourceLocation(SemaRef.FpPragmaStack.CurrentPragmaLocation, Record);
Record.push_back(SemaRef.FpPragmaStack.Stack.size());
for (const auto &StackEntry : SemaRef.FpPragmaStack.Stack) {
- Record.push_back(StackEntry.Value);
+ Record.push_back(StackEntry.Value.getAsOpaqueInt());
AddSourceLocation(StackEntry.PragmaLocation, Record);
AddSourceLocation(StackEntry.PragmaPushLocation, Record);
AddString(StackEntry.StackSlotLabel, Record);
@@ -4981,13 +4981,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
const VarDecl *VD = cast<VarDecl>(D);
Record.push_back(VD->isInline());
Record.push_back(VD->isInlineSpecified());
- if (VD->getInit()) {
- Record.push_back(!VD->isInitKnownICE() ? 1
- : (VD->isInitICE() ? 3 : 2));
- Record.AddStmt(const_cast<Expr*>(VD->getInit()));
- } else {
- Record.push_back(0);
- }
+ Record.AddVarDeclInit(VD);
break;
}
@@ -5114,6 +5108,12 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
}
}
+void ASTWriter::AddAlignPackInfo(const Sema::AlignPackInfo &Info,
+ RecordDataImpl &Record) {
+ uint32_t Raw = Sema::AlignPackInfo::getRawEncoding(Info);
+ Record.push_back(Raw);
+}
+
void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record) {
uint32_t Raw = Loc.getRawEncoding();
Record.push_back((Raw << 1) | (Raw >> 31));
@@ -5128,61 +5128,6 @@ void ASTRecordWriter::AddAPFloat(const llvm::APFloat &Value) {
AddAPInt(Value.bitcastToAPInt());
}
-static void WriteFixedPointSemantics(ASTRecordWriter &Record,
- FixedPointSemantics FPSema) {
- Record.push_back(FPSema.getWidth());
- Record.push_back(FPSema.getScale());
- Record.push_back(FPSema.isSigned() | FPSema.isSaturated() << 1 |
- FPSema.hasUnsignedPadding() << 2);
-}
-
-void ASTRecordWriter::AddAPValue(const APValue &Value) {
- APValue::ValueKind Kind = Value.getKind();
- push_back(static_cast<uint64_t>(Kind));
- switch (Kind) {
- case APValue::None:
- case APValue::Indeterminate:
- return;
- case APValue::Int:
- AddAPSInt(Value.getInt());
- return;
- case APValue::Float:
- push_back(static_cast<uint64_t>(
- llvm::APFloatBase::SemanticsToEnum(Value.getFloat().getSemantics())));
- AddAPFloat(Value.getFloat());
- return;
- case APValue::FixedPoint: {
- WriteFixedPointSemantics(*this, Value.getFixedPoint().getSemantics());
- AddAPSInt(Value.getFixedPoint().getValue());
- return;
- }
- case APValue::ComplexInt: {
- AddAPSInt(Value.getComplexIntReal());
- AddAPSInt(Value.getComplexIntImag());
- return;
- }
- case APValue::ComplexFloat: {
- push_back(static_cast<uint64_t>(llvm::APFloatBase::SemanticsToEnum(
- Value.getComplexFloatReal().getSemantics())));
- AddAPFloat(Value.getComplexFloatReal());
- push_back(static_cast<uint64_t>(llvm::APFloatBase::SemanticsToEnum(
- Value.getComplexFloatImag().getSemantics())));
- AddAPFloat(Value.getComplexFloatImag());
- return;
- }
- case APValue::LValue:
- case APValue::Vector:
- case APValue::Array:
- case APValue::Struct:
- case APValue::Union:
- case APValue::MemberPointer:
- case APValue::AddrLabelDiff:
- // TODO : Handle all these APValue::ValueKind.
- return;
- }
- llvm_unreachable("Invalid APValue::ValueKind");
-}
-
void ASTWriter::AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Record) {
Record.push_back(getIdentifierRef(II));
}
@@ -5747,6 +5692,23 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
}
}
+void ASTRecordWriter::AddVarDeclInit(const VarDecl *VD) {
+ const Expr *Init = VD->getInit();
+ if (!Init) {
+ push_back(0);
+ return;
+ }
+
+ unsigned Val = 1;
+ if (EvaluatedStmt *ES = VD->getEvaluatedStmt()) {
+ Val |= (ES->HasConstantInitialization ? 2 : 0);
+ Val |= (ES->HasConstantDestruction ? 4 : 0);
+ // FIXME: Also emit the constant initializer value.
+ }
+ push_back(Val);
+ writeStmtRef(Init);
+}
+
void ASTWriter::ReaderInitialized(ASTReader *Reader) {
assert(Reader && "Cannot remove chain");
assert((!Chain || Chain == Reader) && "Cannot replace chain");
@@ -6117,8 +6079,9 @@ class OMPClauseWriter : public OMPClauseVisitor<OMPClauseWriter> {
public:
OMPClauseWriter(ASTRecordWriter &Record) : Record(Record) {}
-#define OMP_CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *S);
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
+#define GEN_CLANG_CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *S);
+#include "llvm/Frontend/OpenMP/OMP.inc"
void writeClause(OMPClause *C);
void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
@@ -6583,8 +6546,13 @@ void OMPClauseWriter::VisitOMPToClause(OMPToClause *C) {
Record.push_back(C->getTotalComponentListNum());
Record.push_back(C->getTotalComponentsNum());
Record.AddSourceLocation(C->getLParenLoc());
+ for (unsigned I = 0; I < NumberOfOMPMotionModifiers; ++I) {
+ Record.push_back(C->getMotionModifier(I));
+ Record.AddSourceLocation(C->getMotionModifierLoc(I));
+ }
Record.AddNestedNameSpecifierLoc(C->getMapperQualifierLoc());
Record.AddDeclarationNameInfo(C->getMapperIdInfo());
+ Record.AddSourceLocation(C->getColonLoc());
for (auto *E : C->varlists())
Record.AddStmt(E);
for (auto *E : C->mapperlists())
@@ -6597,6 +6565,7 @@ void OMPClauseWriter::VisitOMPToClause(OMPToClause *C) {
Record.push_back(N);
for (auto &M : C->all_components()) {
Record.AddStmt(M.getAssociatedExpression());
+ Record.writeBool(M.isNonContiguous());
Record.AddDeclRef(M.getAssociatedDeclaration());
}
}
@@ -6607,8 +6576,13 @@ void OMPClauseWriter::VisitOMPFromClause(OMPFromClause *C) {
Record.push_back(C->getTotalComponentListNum());
Record.push_back(C->getTotalComponentsNum());
Record.AddSourceLocation(C->getLParenLoc());
+ for (unsigned I = 0; I < NumberOfOMPMotionModifiers; ++I) {
+ Record.push_back(C->getMotionModifier(I));
+ Record.AddSourceLocation(C->getMotionModifierLoc(I));
+ }
Record.AddNestedNameSpecifierLoc(C->getMapperQualifierLoc());
Record.AddDeclarationNameInfo(C->getMapperIdInfo());
+ Record.AddSourceLocation(C->getColonLoc());
for (auto *E : C->varlists())
Record.AddStmt(E);
for (auto *E : C->mapperlists())
@@ -6621,6 +6595,7 @@ void OMPClauseWriter::VisitOMPFromClause(OMPFromClause *C) {
Record.push_back(N);
for (auto &M : C->all_components()) {
Record.AddStmt(M.getAssociatedExpression());
+ Record.writeBool(M.isNonContiguous());
Record.AddDeclRef(M.getAssociatedDeclaration());
}
}
@@ -6773,3 +6748,17 @@ void ASTRecordWriter::writeOMPTraitInfo(const OMPTraitInfo *TI) {
}
}
}
+
+void ASTRecordWriter::writeOMPChildren(OMPChildren *Data) {
+ if (!Data)
+ return;
+ writeUInt32(Data->getNumClauses());
+ writeUInt32(Data->getNumChildren());
+ writeBool(Data->hasAssociatedStmt());
+ for (unsigned I = 0, E = Data->getNumClauses(); I < E; ++I)
+ writeOMPClause(Data->getClauses()[I]);
+ if (Data->hasAssociatedStmt())
+ AddStmt(Data->getAssociatedStmt());
+ for (unsigned I = 0, E = Data->getNumChildren(); I < E; ++I)
+ AddStmt(Data->getChildren()[I]);
+}
diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp
index eecdf89c791a..2cb44bf9038b 100644
--- a/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -96,6 +96,7 @@ namespace clang {
void VisitFieldDecl(FieldDecl *D);
void VisitMSPropertyDecl(MSPropertyDecl *D);
void VisitMSGuidDecl(MSGuidDecl *D);
+ void VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *D);
void VisitVarDecl(VarDecl *D);
void VisitImplicitParamDecl(ImplicitParamDecl *D);
@@ -556,7 +557,7 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
Record.push_back(D->isDefaulted());
Record.push_back(D->isExplicitlyDefaulted());
Record.push_back(D->hasImplicitReturnZero());
- Record.push_back(D->getConstexprKind());
+ Record.push_back(static_cast<uint64_t>(D->getConstexprKind()));
Record.push_back(D->usesSEHTry());
Record.push_back(D->hasSkippedBody());
Record.push_back(D->isMultiVersion());
@@ -565,7 +566,6 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
Record.AddSourceLocation(D->getEndLoc());
Record.push_back(D->getODRHash());
- Record.push_back(D->usesFPIntrin());
if (D->isDefaulted()) {
if (auto *FDI = D->getDefaultedFunctionInfo()) {
@@ -965,6 +965,12 @@ void ASTDeclWriter::VisitMSGuidDecl(MSGuidDecl *D) {
Code = serialization::DECL_MS_GUID;
}
+void ASTDeclWriter::VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D) {
+ VisitValueDecl(D);
+ Record.AddAPValue(D->getValue());
+ Code = serialization::DECL_TEMPLATE_PARAM_OBJECT;
+}
+
void ASTDeclWriter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
VisitValueDecl(D);
Record.push_back(D->getChainingSize());
@@ -1000,19 +1006,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
}
Record.push_back(D->getLinkageInternal());
- if (D->getInit()) {
- if (!D->isInitKnownICE())
- Record.push_back(1);
- else {
- Record.push_back(
- 2 |
- (D->isInitICE() ? 1 : 0) |
- (D->ensureEvaluatedStmt()->HasConstantDestruction ? 4 : 0));
- }
- Record.AddStmt(D->getInit());
- } else {
- Record.push_back(0);
- }
+ Record.AddVarDeclInit(D);
if (D->hasAttr<BlocksAttr>() && D->getType()->getAsCXXRecordDecl()) {
BlockVarCopyInit Init = Writer.Context->getBlockVarCopyInit(D);
@@ -1031,8 +1025,10 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
// that module interface unit, not by its users. (Inline variables are
// still emitted in module users.)
ModulesCodegen =
- (Writer.WritingModule->Kind == Module::ModuleInterfaceUnit &&
- Writer.Context->GetGVALinkageForVariable(D) == GVA_StrongExternal);
+ (Writer.WritingModule->Kind == Module::ModuleInterfaceUnit ||
+ (D->hasAttr<DLLExportAttr>() &&
+ Writer.Context->getLangOpts().BuildingPCHWithObjectFile)) &&
+ Writer.Context->GetGVALinkageForVariable(D) == GVA_StrongExternal;
}
Record.push_back(ModulesCodegen);
if (ModulesCodegen)
@@ -1841,29 +1837,20 @@ void ASTDeclWriter::VisitRedeclarable(Redeclarable<T> *D) {
}
void ASTDeclWriter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
- Record.push_back(D->varlist_size());
+ Record.writeOMPChildren(D->Data);
VisitDecl(D);
- for (auto *I : D->varlists())
- Record.AddStmt(I);
Code = serialization::DECL_OMP_THREADPRIVATE;
}
void ASTDeclWriter::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
- Record.push_back(D->varlist_size());
- Record.push_back(D->clauselist_size());
+ Record.writeOMPChildren(D->Data);
VisitDecl(D);
- for (auto *I : D->varlists())
- Record.AddStmt(I);
- for (OMPClause *C : D->clauselists())
- Record.writeOMPClause(C);
Code = serialization::DECL_OMP_ALLOCATE;
}
void ASTDeclWriter::VisitOMPRequiresDecl(OMPRequiresDecl *D) {
- Record.push_back(D->clauselist_size());
+ Record.writeOMPChildren(D->Data);
VisitDecl(D);
- for (OMPClause *C : D->clauselists())
- Record.writeOMPClause(C);
Code = serialization::DECL_OMP_REQUIRES;
}
@@ -1882,14 +1869,10 @@ void ASTDeclWriter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
}
void ASTDeclWriter::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
- Record.push_back(D->clauselist_size());
+ Record.writeOMPChildren(D->Data);
VisitValueDecl(D);
- Record.AddSourceLocation(D->getBeginLoc());
- Record.AddStmt(D->getMapperVarRef());
Record.AddDeclarationName(D->getVarName());
Record.AddDeclRef(D->getPrevDeclInScope());
- for (OMPClause *C : D->clauselists())
- Record.writeOMPClause(C);
Code = serialization::DECL_OMP_DECLARE_MAPPER;
}
@@ -2210,7 +2193,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // ImplicitParamKind
Abv->Add(BitCodeAbbrevOp(0)); // EscapingByref
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // IsInitICE (local)
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // HasConstant*
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // VarKind (local enum)
// Type Source Info
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
@@ -2357,6 +2340,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
// CastExpr
Abv->Add(BitCodeAbbrevOp(0)); // PathSize
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasFPFeatures
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 6)); // CastKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // PartOfExplicitCast
// ImplicitCastExpr
@@ -2469,7 +2453,10 @@ void ASTRecordWriter::AddFunctionDefinition(const FunctionDecl *FD) {
Linkage = Writer->Context->GetGVALinkageForFunction(FD);
ModulesCodegen = *Linkage == GVA_StrongExternal;
}
- if (Writer->Context->getLangOpts().ModulesCodegen) {
+ if (Writer->Context->getLangOpts().ModulesCodegen ||
+ (FD->hasAttr<DLLExportAttr>() &&
+ Writer->Context->getLangOpts().BuildingPCHWithObjectFile)) {
+
// Under -fmodules-codegen, codegen is performed for all non-internal,
// non-always_inline functions, unless they are available elsewhere.
if (!FD->hasAttr<AlwaysInlineAttr>()) {
diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp
index 0767b3a24bf2..d4f669ea0183 100644
--- a/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -152,6 +152,8 @@ void ASTStmtWriter::VisitIfStmt(IfStmt *S) {
Record.AddStmt(S->getInit());
Record.AddSourceLocation(S->getIfLoc());
+ Record.AddSourceLocation(S->getLParenLoc());
+ Record.AddSourceLocation(S->getRParenLoc());
if (HasElse)
Record.AddSourceLocation(S->getElseLoc());
@@ -175,6 +177,8 @@ void ASTStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
Record.AddDeclRef(S->getConditionVariable());
Record.AddSourceLocation(S->getSwitchLoc());
+ Record.AddSourceLocation(S->getLParenLoc());
+ Record.AddSourceLocation(S->getRParenLoc());
for (SwitchCase *SC = S->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase())
@@ -848,12 +852,15 @@ void ASTStmtWriter::VisitOMPIteratorExpr(OMPIteratorExpr *E) {
void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumArgs());
+ Record.push_back(E->hasStoredFPFeatures());
Record.AddSourceLocation(E->getRParenLoc());
Record.AddStmt(E->getCallee());
for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
Arg != ArgEnd; ++Arg)
Record.AddStmt(*Arg);
Record.push_back(static_cast<unsigned>(E->getADLCallKind()));
+ if (E->hasStoredFPFeatures())
+ Record.push_back(E->getFPFeatures().getAsOpaqueInt());
Code = serialization::EXPR_CALL;
}
@@ -939,12 +946,16 @@ void ASTStmtWriter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
void ASTStmtWriter::VisitCastExpr(CastExpr *E) {
VisitExpr(E);
Record.push_back(E->path_size());
+ Record.push_back(E->hasStoredFPFeatures());
Record.AddStmt(E->getSubExpr());
Record.push_back(E->getCastKind()); // FIXME: stable encoding
for (CastExpr::path_iterator
PI = E->path_begin(), PE = E->path_end(); PI != PE; ++PI)
Record.AddCXXBaseSpecifier(**PI);
+
+ if (E->hasStoredFPFeatures())
+ Record.push_back(E->getFPFeatures().getAsOpaqueInt());
}
void ASTStmtWriter::VisitBinaryOperator(BinaryOperator *E) {
@@ -996,7 +1007,7 @@ void ASTStmtWriter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
VisitCastExpr(E);
Record.push_back(E->isPartOfExplicitCast());
- if (E->path_size() == 0)
+ if (E->path_size() == 0 && !E->hasStoredFPFeatures())
AbbrevToUse = Writer.getExprImplicitCastAbbrev();
Code = serialization::EXPR_IMPLICIT_CAST;
@@ -1550,7 +1561,6 @@ void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
Record.push_back(E->getOperator());
Record.AddSourceRange(E->Range);
- Record.push_back(E->getFPFeatures().getAsOpaqueInt());
Code = serialization::EXPR_CXX_OPERATOR_CALL;
}
@@ -1890,7 +1900,7 @@ ASTStmtWriter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
void
ASTStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
VisitExpr(E);
- Record.push_back(E->arg_size());
+ Record.push_back(E->getNumArgs());
for (CXXUnresolvedConstructExpr::arg_iterator
ArgI = E->arg_begin(), ArgE = E->arg_end(); ArgI != ArgE; ++ArgI)
Record.AddStmt(*ArgI);
@@ -2008,6 +2018,7 @@ void ASTStmtWriter::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
VisitExpr(E);
Record.AddDeclRef(E->getParameter());
+ Record.push_back(E->isReferenceParameter());
Record.AddSourceLocation(E->getNameLoc());
Record.AddStmt(E->getReplacement());
Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM;
@@ -2051,6 +2062,7 @@ void ASTStmtWriter::VisitCXXFoldExpr(CXXFoldExpr *E) {
Record.push_back(E->NumExpansions);
Record.AddStmt(E->SubExprs[0]);
Record.AddStmt(E->SubExprs[1]);
+ Record.AddStmt(E->SubExprs[2]);
Record.push_back(E->Opcode);
Code = serialization::EXPR_CXX_FOLD;
}
@@ -2157,85 +2169,23 @@ void ASTStmtWriter::VisitSEHLeaveStmt(SEHLeaveStmt *S) {
//===----------------------------------------------------------------------===//
// OpenMP Directives.
//===----------------------------------------------------------------------===//
+
void ASTStmtWriter::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
+ Record.writeOMPChildren(E->Data);
Record.AddSourceLocation(E->getBeginLoc());
Record.AddSourceLocation(E->getEndLoc());
- for (unsigned i = 0; i < E->getNumClauses(); ++i) {
- Record.writeOMPClause(E->getClause(i));
- }
- if (E->hasAssociatedStmt())
- Record.AddStmt(E->getAssociatedStmt());
}
void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
- Record.push_back(D->getCollapsedNumber());
+ Record.writeUInt32(D->getCollapsedNumber());
VisitOMPExecutableDirective(D);
- Record.AddStmt(D->getIterationVariable());
- Record.AddStmt(D->getLastIteration());
- Record.AddStmt(D->getCalcLastIteration());
- Record.AddStmt(D->getPreCond());
- Record.AddStmt(D->getCond());
- Record.AddStmt(D->getInit());
- Record.AddStmt(D->getInc());
- Record.AddStmt(D->getPreInits());
- if (isOpenMPWorksharingDirective(D->getDirectiveKind()) ||
- isOpenMPTaskLoopDirective(D->getDirectiveKind()) ||
- isOpenMPDistributeDirective(D->getDirectiveKind())) {
- Record.AddStmt(D->getIsLastIterVariable());
- Record.AddStmt(D->getLowerBoundVariable());
- Record.AddStmt(D->getUpperBoundVariable());
- Record.AddStmt(D->getStrideVariable());
- Record.AddStmt(D->getEnsureUpperBound());
- Record.AddStmt(D->getNextLowerBound());
- Record.AddStmt(D->getNextUpperBound());
- Record.AddStmt(D->getNumIterations());
- }
- if (isOpenMPLoopBoundSharingDirective(D->getDirectiveKind())) {
- Record.AddStmt(D->getPrevLowerBoundVariable());
- Record.AddStmt(D->getPrevUpperBoundVariable());
- Record.AddStmt(D->getDistInc());
- Record.AddStmt(D->getPrevEnsureUpperBound());
- Record.AddStmt(D->getCombinedLowerBoundVariable());
- Record.AddStmt(D->getCombinedUpperBoundVariable());
- Record.AddStmt(D->getCombinedEnsureUpperBound());
- Record.AddStmt(D->getCombinedInit());
- Record.AddStmt(D->getCombinedCond());
- Record.AddStmt(D->getCombinedNextLowerBound());
- Record.AddStmt(D->getCombinedNextUpperBound());
- Record.AddStmt(D->getCombinedDistCond());
- Record.AddStmt(D->getCombinedParForInDistCond());
- }
- for (auto I : D->counters()) {
- Record.AddStmt(I);
- }
- for (auto I : D->private_counters()) {
- Record.AddStmt(I);
- }
- for (auto I : D->inits()) {
- Record.AddStmt(I);
- }
- for (auto I : D->updates()) {
- Record.AddStmt(I);
- }
- for (auto I : D->finals()) {
- Record.AddStmt(I);
- }
- for (Stmt *S : D->dependent_counters())
- Record.AddStmt(S);
- for (Stmt *S : D->dependent_inits())
- Record.AddStmt(S);
- for (Stmt *S : D->finals_conditions())
- Record.AddStmt(S);
}
void ASTStmtWriter::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_PARALLEL_DIRECTIVE;
}
@@ -2246,8 +2196,7 @@ void ASTStmtWriter::VisitOMPSimdDirective(OMPSimdDirective *D) {
void ASTStmtWriter::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_FOR_DIRECTIVE;
}
@@ -2258,23 +2207,20 @@ void ASTStmtWriter::VisitOMPForSimdDirective(OMPForSimdDirective *D) {
void ASTStmtWriter::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_SECTIONS_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPSectionDirective(OMPSectionDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_SECTION_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPSingleDirective(OMPSingleDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_SINGLE_DIRECTIVE;
}
@@ -2287,7 +2233,6 @@ void ASTStmtWriter::VisitOMPMasterDirective(OMPMasterDirective *D) {
void ASTStmtWriter::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Record.AddDeclarationNameInfo(D->getDirectiveName());
Code = serialization::STMT_OMP_CRITICAL_DIRECTIVE;
@@ -2295,8 +2240,7 @@ void ASTStmtWriter::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
void ASTStmtWriter::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
VisitOMPLoopDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_PARALLEL_FOR_DIRECTIVE;
}
@@ -2309,53 +2253,41 @@ void ASTStmtWriter::VisitOMPParallelForSimdDirective(
void ASTStmtWriter::VisitOMPParallelMasterDirective(
OMPParallelMasterDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
Code = serialization::STMT_OMP_PARALLEL_MASTER_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPTaskDirective(OMPTaskDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_TASK_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
- Record.AddStmt(D->getX());
- Record.AddStmt(D->getV());
- Record.AddStmt(D->getExpr());
- Record.AddStmt(D->getUpdateExpr());
- Record.push_back(D->isXLHSInRHSPart() ? 1 : 0);
- Record.push_back(D->isPostfixUpdate() ? 1 : 0);
+ Record.writeBool(D->isXLHSInRHSPart());
+ Record.writeBool(D->isPostfixUpdate());
Code = serialization::STMT_OMP_ATOMIC_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPTargetDirective(OMPTargetDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_TARGET_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPTargetDataDirective(OMPTargetDataDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_TARGET_DATA_DIRECTIVE;
}
@@ -2363,7 +2295,6 @@ void ASTStmtWriter::VisitOMPTargetDataDirective(OMPTargetDataDirective *D) {
void ASTStmtWriter::VisitOMPTargetEnterDataDirective(
OMPTargetEnterDataDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_TARGET_ENTER_DATA_DIRECTIVE;
}
@@ -2371,7 +2302,6 @@ void ASTStmtWriter::VisitOMPTargetEnterDataDirective(
void ASTStmtWriter::VisitOMPTargetExitDataDirective(
OMPTargetExitDataDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_TARGET_EXIT_DATA_DIRECTIVE;
}
@@ -2379,9 +2309,7 @@ void ASTStmtWriter::VisitOMPTargetExitDataDirective(
void ASTStmtWriter::VisitOMPTargetParallelDirective(
OMPTargetParallelDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_TARGET_PARALLEL_DIRECTIVE;
}
@@ -2389,8 +2317,7 @@ void ASTStmtWriter::VisitOMPTargetParallelDirective(
void ASTStmtWriter::VisitOMPTargetParallelForDirective(
OMPTargetParallelForDirective *D) {
VisitOMPLoopDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_TARGET_PARALLEL_FOR_DIRECTIVE;
}
@@ -2414,43 +2341,36 @@ void ASTStmtWriter::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *D) {
void ASTStmtWriter::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
- Record.AddStmt(D->getReductionRef());
Code = serialization::STMT_OMP_TASKGROUP_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPFlushDirective(OMPFlushDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_FLUSH_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPDepobjDirective(OMPDepobjDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_DEPOBJ_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPScanDirective(OMPScanDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_SCAN_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_ORDERED_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPTeamsDirective(OMPTeamsDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_TEAMS_DIRECTIVE;
}
@@ -2459,21 +2379,20 @@ void ASTStmtWriter::VisitOMPCancellationPointDirective(
OMPCancellationPointDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
- Record.push_back(uint64_t(D->getCancelRegion()));
+ Record.writeEnum(D->getCancelRegion());
Code = serialization::STMT_OMP_CANCELLATION_POINT_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPCancelDirective(OMPCancelDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
- Record.push_back(uint64_t(D->getCancelRegion()));
+ Record.writeEnum(D->getCancelRegion());
Code = serialization::STMT_OMP_CANCEL_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_TASKLOOP_DIRECTIVE;
}
@@ -2485,7 +2404,7 @@ void ASTStmtWriter::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
void ASTStmtWriter::VisitOMPMasterTaskLoopDirective(
OMPMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_MASTER_TASKLOOP_DIRECTIVE;
}
@@ -2498,7 +2417,7 @@ void ASTStmtWriter::VisitOMPMasterTaskLoopSimdDirective(
void ASTStmtWriter::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE;
}
@@ -2515,7 +2434,6 @@ void ASTStmtWriter::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
void ASTStmtWriter::VisitOMPTargetUpdateDirective(OMPTargetUpdateDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_TARGET_UPDATE_DIRECTIVE;
}
@@ -2523,8 +2441,7 @@ void ASTStmtWriter::VisitOMPTargetUpdateDirective(OMPTargetUpdateDirective *D) {
void ASTStmtWriter::VisitOMPDistributeParallelForDirective(
OMPDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
}
@@ -2572,14 +2489,12 @@ void ASTStmtWriter::VisitOMPTeamsDistributeParallelForSimdDirective(
void ASTStmtWriter::VisitOMPTeamsDistributeParallelForDirective(
OMPTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPTargetTeamsDirective(OMPTargetTeamsDirective *D) {
VisitStmt(D);
- Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_TARGET_TEAMS_DIRECTIVE;
}
@@ -2593,8 +2508,7 @@ void ASTStmtWriter::VisitOMPTargetTeamsDistributeDirective(
void ASTStmtWriter::VisitOMPTargetTeamsDistributeParallelForDirective(
OMPTargetTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
- Record.AddStmt(D->getTaskReductionRefExpr());
- Record.push_back(D->hasCancel() ? 1 : 0);
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
}
diff --git a/clang/lib/Serialization/GlobalModuleIndex.cpp b/clang/lib/Serialization/GlobalModuleIndex.cpp
index 9192b3b476bb..52ce17d984bf 100644
--- a/clang/lib/Serialization/GlobalModuleIndex.cpp
+++ b/clang/lib/Serialization/GlobalModuleIndex.cpp
@@ -905,7 +905,7 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
}
// The output buffer, into which the global index will be written.
- SmallVector<char, 16> OutputBuffer;
+ SmallString<16> OutputBuffer;
{
llvm::BitstreamWriter OutputStream(OutputBuffer);
if (Builder.writeIndex(OutputStream))
@@ -913,9 +913,8 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
"failed writing index");
}
- return llvm::writeFileAtomically(
- (IndexPath + "-%%%%%%%%").str(), IndexPath,
- llvm::StringRef(OutputBuffer.data(), OutputBuffer.size()));
+ return llvm::writeFileAtomically((IndexPath + "-%%%%%%%%").str(), IndexPath,
+ OutputBuffer);
}
namespace {
diff --git a/clang/lib/Serialization/ModuleManager.cpp b/clang/lib/Serialization/ModuleManager.cpp
index a42ed2f3c179..40ffa6cfee8f 100644
--- a/clang/lib/Serialization/ModuleManager.cpp
+++ b/clang/lib/Serialization/ModuleManager.cpp
@@ -112,7 +112,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
// Look for the file entry. This only fails if the expected size or
// modification time differ.
- const FileEntry *Entry;
+ OptionalFileEntryRefDegradesToFileEntryPtr Entry;
if (Type == MK_ExplicitModule || Type == MK_PrebuiltModule) {
// If we're not expecting to pull this file out of the module cache, it
// might have a different mtime due to being moved across filesystems in
@@ -132,15 +132,38 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
return Missing;
}
+ // The ModuleManager's use of FileEntry nodes as the keys for its map of
+ // loaded modules is less than ideal. Uniqueness for FileEntry nodes is
+ // maintained by FileManager, which in turn uses inode numbers on hosts
+ // that support that. When coupled with the module cache's proclivity for
+ // turning over and deleting stale PCMs, this means entries for different
+ // module files can wind up reusing the same underlying inode. When this
+ // happens, subsequent accesses to the Modules map will disagree on the
+ // ModuleFile associated with a given file. In general, it is not sufficient
+ // to resolve this conundrum with a type like FileEntryRef that stores the
+ // name of the FileEntry node on first access because of path canonicalization
+ // issues. However, the paths constructed for implicit module builds are
+ // fully under Clang's control. We *can*, therefore, rely on their structure
+ // being consistent across operating systems and across subsequent accesses
+ // to the Modules map.
+ auto implicitModuleNamesMatch = [](ModuleKind Kind, const ModuleFile *MF,
+ const FileEntry *Entry) -> bool {
+ if (Kind != MK_ImplicitModule)
+ return true;
+ return Entry->getName() == MF->FileName;
+ };
+
// Check whether we already loaded this module, before
if (ModuleFile *ModuleEntry = Modules.lookup(Entry)) {
- // Check the stored signature.
- if (checkSignature(ModuleEntry->Signature, ExpectedSignature, ErrorStr))
- return OutOfDate;
-
- Module = ModuleEntry;
- updateModuleImports(*ModuleEntry, ImportedBy, ImportLoc);
- return AlreadyLoaded;
+ if (implicitModuleNamesMatch(Type, ModuleEntry, Entry)) {
+ // Check the stored signature.
+ if (checkSignature(ModuleEntry->Signature, ExpectedSignature, ErrorStr))
+ return OutOfDate;
+
+ Module = ModuleEntry;
+ updateModuleImports(*ModuleEntry, ImportedBy, ImportLoc);
+ return AlreadyLoaded;
+ }
}
// Allocate a new module.
@@ -265,7 +288,7 @@ void ModuleManager::removeModules(ModuleIterator First, ModuleMap *modMap) {
if (modMap) {
StringRef ModuleName = victim->ModuleName;
if (Module *mod = modMap->findModule(ModuleName)) {
- mod->setASTFile(nullptr);
+ mod->setASTFile(None);
}
}
}
@@ -435,23 +458,21 @@ void ModuleManager::visit(llvm::function_ref<bool(ModuleFile &M)> Visitor,
returnVisitState(State);
}
-bool ModuleManager::lookupModuleFile(StringRef FileName,
- off_t ExpectedSize,
+bool ModuleManager::lookupModuleFile(StringRef FileName, off_t ExpectedSize,
time_t ExpectedModTime,
- const FileEntry *&File) {
- if (FileName == "-") {
- File = nullptr;
+ Optional<FileEntryRef> &File) {
+ File = None;
+ if (FileName == "-")
return false;
- }
// Open the file immediately to ensure there is no race between stat'ing and
// opening the file.
- auto FileOrErr = FileMgr.getFile(FileName, /*OpenFile=*/true,
- /*CacheFailure=*/false);
- if (!FileOrErr) {
- File = nullptr;
+ Optional<FileEntryRef> FileOrErr =
+ expectedToOptional(FileMgr.getFileRef(FileName, /*OpenFile=*/true,
+ /*CacheFailure=*/false));
+ if (!FileOrErr)
return false;
- }
+
File = *FileOrErr;
if ((ExpectedSize && ExpectedSize != File->getSize()) ||
diff --git a/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 918c6e361381..a86a410ebcbc 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -978,8 +978,7 @@ void ObjCLoopChecker::checkPostStmt(const ObjCForCollectionStmt *FCS,
ProgramStateRef State = C.getState();
// Check if this is the branch for the end of the loop.
- SVal CollectionSentinel = C.getSVal(FCS);
- if (CollectionSentinel.isZeroConstant()) {
+ if (!ExprEngine::hasMoreIteration(State, FCS, C.getLocationContext())) {
if (!alreadyExecutedAtLeastOneLoopIteration(C.getPredecessor(), FCS))
State = assumeCollectionNonEmpty(C, State, FCS, /*Assumption*/false);
diff --git a/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
index 1ef70b650414..131c1345af99 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
@@ -106,7 +106,7 @@ static const NoteTag *getNoteTag(CheckerContext &C,
QualType CastToTy, const Expr *Object,
bool CastSucceeds, bool IsKnownCast) {
std::string CastToName =
- CastInfo ? CastInfo->to()->getPointeeCXXRecordDecl()->getNameAsString()
+ CastInfo ? CastInfo->to()->getAsCXXRecordDecl()->getNameAsString()
: CastToTy->getPointeeCXXRecordDecl()->getNameAsString();
Object = Object->IgnoreParenImpCasts();
@@ -119,10 +119,10 @@ static const NoteTag *getNoteTag(CheckerContext &C,
Out << "Assuming ";
if (const auto *DRE = dyn_cast<DeclRefExpr>(Object)) {
- Out << '\'' << DRE->getDecl()->getNameAsString() << '\'';
+ Out << '\'' << DRE->getDecl()->getDeclName() << '\'';
} else if (const auto *ME = dyn_cast<MemberExpr>(Object)) {
Out << (IsKnownCast ? "Field '" : "field '")
- << ME->getMemberDecl()->getNameAsString() << '\'';
+ << ME->getMemberDecl()->getDeclName() << '\'';
} else {
Out << (IsKnownCast ? "The object" : "the object");
}
@@ -135,6 +135,47 @@ static const NoteTag *getNoteTag(CheckerContext &C,
/*IsPrunable=*/true);
}
+static const NoteTag *getNoteTag(CheckerContext &C,
+ SmallVector<QualType, 4> CastToTyVec,
+ const Expr *Object,
+ bool IsKnownCast) {
+ Object = Object->IgnoreParenImpCasts();
+
+ return C.getNoteTag(
+ [=]() -> std::string {
+ SmallString<128> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+
+ if (!IsKnownCast)
+ Out << "Assuming ";
+
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Object)) {
+ Out << '\'' << DRE->getDecl()->getNameAsString() << '\'';
+ } else if (const auto *ME = dyn_cast<MemberExpr>(Object)) {
+ Out << (IsKnownCast ? "Field '" : "field '")
+ << ME->getMemberDecl()->getNameAsString() << '\'';
+ } else {
+ Out << (IsKnownCast ? "The object" : "the object");
+ }
+ Out << " is";
+
+ bool First = true;
+ for (QualType CastToTy: CastToTyVec) {
+ std::string CastToName =
+ CastToTy->getAsCXXRecordDecl() ?
+ CastToTy->getAsCXXRecordDecl()->getNameAsString() :
+ CastToTy->getPointeeCXXRecordDecl()->getNameAsString();
+ Out << ' ' << ((CastToTyVec.size() == 1) ? "not" :
+ (First ? "neither" : "nor")) << " a '" << CastToName
+ << '\'';
+ First = false;
+ }
+
+ return std::string(Out.str());
+ },
+ /*IsPrunable=*/true);
+}
+
//===----------------------------------------------------------------------===//
// Main logic to evaluate a cast.
//===----------------------------------------------------------------------===//
@@ -220,40 +261,76 @@ static void addInstanceOfTransition(const CallEvent &Call,
bool IsInstanceOf) {
const FunctionDecl *FD = Call.getDecl()->getAsFunction();
QualType CastFromTy = Call.parameters()[0]->getType();
- QualType CastToTy = FD->getTemplateSpecializationArgs()->get(0).getAsType();
- if (CastFromTy->isPointerType())
- CastToTy = C.getASTContext().getPointerType(CastToTy);
- else if (CastFromTy->isReferenceType())
- CastToTy = alignReferenceTypes(CastToTy, CastFromTy, C.getASTContext());
- else
- return;
+ SmallVector<QualType, 4> CastToTyVec;
+ for (unsigned idx = 0; idx < FD->getTemplateSpecializationArgs()->size() - 1;
+ ++idx) {
+ TemplateArgument CastToTempArg =
+ FD->getTemplateSpecializationArgs()->get(idx);
+ switch (CastToTempArg.getKind()) {
+ default:
+ return;
+ case TemplateArgument::Type:
+ CastToTyVec.push_back(CastToTempArg.getAsType());
+ break;
+ case TemplateArgument::Pack:
+ for (TemplateArgument ArgInPack: CastToTempArg.pack_elements())
+ CastToTyVec.push_back(ArgInPack.getAsType());
+ break;
+ }
+ }
const MemRegion *MR = DV.getAsRegion();
- const DynamicCastInfo *CastInfo =
- getDynamicCastInfo(State, MR, CastFromTy, CastToTy);
+ if (MR && CastFromTy->isReferenceType())
+ MR = State->getSVal(DV.castAs<Loc>()).getAsRegion();
+
+ bool Success = false;
+ bool IsAnyKnown = false;
+ for (QualType CastToTy: CastToTyVec) {
+ if (CastFromTy->isPointerType())
+ CastToTy = C.getASTContext().getPointerType(CastToTy);
+ else if (CastFromTy->isReferenceType())
+ CastToTy = alignReferenceTypes(CastToTy, CastFromTy, C.getASTContext());
+ else
+ return;
- bool CastSucceeds;
- if (CastInfo)
- CastSucceeds = IsInstanceOf && CastInfo->succeeds();
- else
- CastSucceeds = IsInstanceOf || CastFromTy == CastToTy;
+ const DynamicCastInfo *CastInfo =
+ getDynamicCastInfo(State, MR, CastFromTy, CastToTy);
- if (isInfeasibleCast(CastInfo, CastSucceeds)) {
- C.generateSink(State, C.getPredecessor());
- return;
+ bool CastSucceeds;
+ if (CastInfo)
+ CastSucceeds = IsInstanceOf && CastInfo->succeeds();
+ else
+ CastSucceeds = IsInstanceOf || CastFromTy == CastToTy;
+
+ // Store the type and the cast information.
+ bool IsKnownCast = CastInfo || CastFromTy == CastToTy;
+ IsAnyKnown = IsAnyKnown || IsKnownCast;
+ ProgramStateRef NewState = State;
+ if (!IsKnownCast)
+ NewState = setDynamicTypeAndCastInfo(State, MR, CastFromTy, CastToTy,
+ IsInstanceOf);
+
+ if (CastSucceeds) {
+ Success = true;
+ C.addTransition(
+ NewState->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
+ C.getSValBuilder().makeTruthVal(true)),
+ getNoteTag(C, CastInfo, CastToTy, Call.getArgExpr(0), true,
+ IsKnownCast));
+ if (IsKnownCast)
+ return;
+ } else if (CastInfo && CastInfo->succeeds()) {
+ C.generateSink(NewState, C.getPredecessor());
+ return;
+ }
}
- // Store the type and the cast information.
- bool IsKnownCast = CastInfo || CastFromTy == CastToTy;
- if (!IsKnownCast)
- State = setDynamicTypeAndCastInfo(State, MR, CastFromTy, CastToTy,
- IsInstanceOf);
-
- C.addTransition(
- State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
- C.getSValBuilder().makeTruthVal(CastSucceeds)),
- getNoteTag(C, CastInfo, CastToTy, Call.getArgExpr(0), CastSucceeds,
- IsKnownCast));
+ if (!Success) {
+ C.addTransition(
+ State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
+ C.getSValBuilder().makeTruthVal(false)),
+ getNoteTag(C, CastToTyVec, Call.getArgExpr(0), IsAnyKnown));
+ }
}
//===----------------------------------------------------------------------===//
@@ -402,8 +479,9 @@ bool CastValueChecker::evalCall(const CallEvent &Call,
QualType ParamT = Call.parameters()[0]->getType();
QualType ResultT = Call.getResultType();
if (!(ParamT->isPointerType() && ResultT->isPointerType()) &&
- !(ParamT->isReferenceType() && ResultT->isReferenceType()))
+ !(ParamT->isReferenceType() && ResultT->isReferenceType())) {
return false;
+ }
DV = Call.getArgSVal(0).getAs<DefinedOrUnknownSVal>();
break;
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 13836f08a61e..78b3c209ad6b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -406,7 +406,7 @@ ProgramStateRef ObjCDeallocChecker::evalAssume(ProgramStateRef State, SVal Cond,
if (State->get<UnreleasedIvarMap>().isEmpty())
return State;
- auto *CondBSE = dyn_cast_or_null<BinarySymExpr>(Cond.getAsSymExpr());
+ auto *CondBSE = dyn_cast_or_null<BinarySymExpr>(Cond.getAsSymbol());
if (!CondBSE)
return State;
diff --git a/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 03b7cbd1c833..7cdd78b8adfb 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -131,21 +131,21 @@ bool ento::shouldRegisterLiveVariablesDumper(const CheckerManager &mgr) {
//===----------------------------------------------------------------------===//
namespace {
-class LiveStatementsDumper : public Checker<check::ASTCodeBody> {
+class LiveExpressionsDumper : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
BugReporter &BR) const {
if (LiveVariables *L = Mgr.getAnalysis<RelaxedLiveVariables>(D))
- L->dumpStmtLiveness(Mgr.getSourceManager());
+ L->dumpExprLiveness(Mgr.getSourceManager());
}
};
}
-void ento::registerLiveStatementsDumper(CheckerManager &mgr) {
- mgr.registerChecker<LiveStatementsDumper>();
+void ento::registerLiveExpressionsDumper(CheckerManager &mgr) {
+ mgr.registerChecker<LiveExpressionsDumper>();
}
-bool ento::shouldRegisterLiveStatementsDumper(const CheckerManager &mgr) {
+bool ento::shouldRegisterLiveExpressionsDumper(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 2411f0e2d058..adfc2f8cb8fe 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -30,10 +30,14 @@ class DereferenceChecker
: public Checker< check::Location,
check::Bind,
EventDispatcher<ImplicitNullDerefEvent> > {
- mutable std::unique_ptr<BuiltinBug> BT_null;
- mutable std::unique_ptr<BuiltinBug> BT_undef;
+ enum DerefKind { NullPointer, UndefinedPointerValue };
- void reportBug(ProgramStateRef State, const Stmt *S, CheckerContext &C) const;
+ BugType BT_Null{this, "Dereference of null pointer", categories::LogicError};
+ BugType BT_Undef{this, "Dereference of undefined pointer value",
+ categories::LogicError};
+
+ void reportBug(DerefKind K, ProgramStateRef State, const Stmt *S,
+ CheckerContext &C) const;
public:
void checkLocation(SVal location, bool isLoad, const Stmt* S,
@@ -116,18 +120,29 @@ static bool isDeclRefExprToReference(const Expr *E) {
return false;
}
-void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
- CheckerContext &C) const {
+void DereferenceChecker::reportBug(DerefKind K, ProgramStateRef State,
+ const Stmt *S, CheckerContext &C) const {
+ const BugType *BT = nullptr;
+ llvm::StringRef DerefStr1;
+ llvm::StringRef DerefStr2;
+ switch (K) {
+ case DerefKind::NullPointer:
+ BT = &BT_Null;
+ DerefStr1 = " results in a null pointer dereference";
+ DerefStr2 = " results in a dereference of a null pointer";
+ break;
+ case DerefKind::UndefinedPointerValue:
+ BT = &BT_Undef;
+ DerefStr1 = " results in an undefined pointer dereference";
+ DerefStr2 = " results in a dereference of an undefined pointer value";
+ break;
+ };
+
// Generate an error node.
ExplodedNode *N = C.generateErrorNode(State);
if (!N)
return;
- // We know that 'location' cannot be non-null. This is what
- // we call an "explicit" null dereference.
- if (!BT_null)
- BT_null.reset(new BuiltinBug(this, "Dereference of null pointer"));
-
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
@@ -139,7 +154,7 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(S);
AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
State.get(), N->getLocationContext());
- os << " results in a null pointer dereference";
+ os << DerefStr1;
break;
}
case Stmt::OMPArraySectionExprClass: {
@@ -147,11 +162,11 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
const OMPArraySectionExpr *AE = cast<OMPArraySectionExpr>(S);
AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
State.get(), N->getLocationContext());
- os << " results in a null pointer dereference";
+ os << DerefStr1;
break;
}
case Stmt::UnaryOperatorClass: {
- os << "Dereference of null pointer";
+ os << BT->getDescription();
const UnaryOperator *U = cast<UnaryOperator>(S);
AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(),
State.get(), N->getLocationContext(), true);
@@ -160,8 +175,7 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
case Stmt::MemberExprClass: {
const MemberExpr *M = cast<MemberExpr>(S);
if (M->isArrow() || isDeclRefExprToReference(M->getBase())) {
- os << "Access to field '" << M->getMemberNameInfo()
- << "' results in a dereference of a null pointer";
+ os << "Access to field '" << M->getMemberNameInfo() << "'" << DerefStr2;
AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
State.get(), N->getLocationContext(), true);
}
@@ -169,8 +183,7 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
}
case Stmt::ObjCIvarRefExprClass: {
const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(S);
- os << "Access to instance variable '" << *IV->getDecl()
- << "' results in a dereference of a null pointer";
+ os << "Access to instance variable '" << *IV->getDecl() << "'" << DerefStr2;
AddDerefSource(os, Ranges, IV->getBase()->IgnoreParenCasts(),
State.get(), N->getLocationContext(), true);
break;
@@ -180,7 +193,7 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
}
auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_null, buf.empty() ? BT_null->getDescription() : StringRef(buf), N);
+ *BT, buf.empty() ? BT->getDescription() : StringRef(buf), N);
bugreporter::trackExpressionValue(N, bugreporter::getDerefExpr(S), *report);
@@ -195,16 +208,9 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
CheckerContext &C) const {
// Check for dereference of an undefined value.
if (l.isUndef()) {
- if (ExplodedNode *N = C.generateErrorNode()) {
- if (!BT_undef)
- BT_undef.reset(
- new BuiltinBug(this, "Dereference of undefined pointer value"));
-
- auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_undef, BT_undef->getDescription(), N);
- bugreporter::trackExpressionValue(N, bugreporter::getDerefExpr(S), *report);
- C.emitReport(std::move(report));
- }
+ const Expr *DerefExpr = getDereferenceExpr(S);
+ if (!suppressReport(DerefExpr))
+ reportBug(DerefKind::UndefinedPointerValue, C.getState(), DerefExpr, C);
return;
}
@@ -219,12 +225,13 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
ProgramStateRef notNullState, nullState;
std::tie(notNullState, nullState) = state->assume(location);
- // The explicit NULL case.
if (nullState) {
if (!notNullState) {
+ // We know that 'location' can only be null. This is what
+ // we call an "explicit" null dereference.
const Expr *expr = getDereferenceExpr(S);
if (!suppressReport(expr)) {
- reportBug(nullState, expr, C);
+ reportBug(DerefKind::NullPointer, nullState, expr, C);
return;
}
}
@@ -266,7 +273,7 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
if (!StNonNull) {
const Expr *expr = getDereferenceExpr(S, /*IsBind=*/true);
if (!suppressReport(expr)) {
- reportBug(StNull, expr, C);
+ reportBug(DerefKind::NullPointer, StNull, expr, C);
return;
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 4225d890c47a..c0167b53ae26 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -7,11 +7,11 @@
//===----------------------------------------------------------------------===//
#include "Taint.h"
+#include "clang/Analysis/IssueHash.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Checkers/SValExplainer.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/StaticAnalyzer/Core/IssueHash.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
@@ -326,7 +326,7 @@ void ExprInspectionChecker::analyzerHashDump(const CallExpr *CE,
const SourceManager &SM = C.getSourceManager();
FullSourceLoc FL(CE->getArg(0)->getBeginLoc(), SM);
std::string HashContent =
- GetIssueString(SM, FL, getCheckerName().getName(), "Category",
+ getIssueString(FL, getCheckerName().getName(), "Category",
C.getLocationContext()->getDecl(), Opts);
reportBug(HashContent, C);
diff --git a/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
index fc35082705fa..e3f4be0726c8 100644
--- a/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
@@ -20,29 +20,39 @@
// Art:
//
//
-// +-+---------v-+ +------------+
-// acquire_func succeeded | | Escape | |
-// +-----------------> Allocated +---------> Escaped <--+
-// | | | | | |
-// | +-----+------++ +------------+ |
-// | | | |
-// | release_func | +--+ |
-// | | | handle +--------+ |
-// | | | dies | | |
-// | +----v-----+ +---------> Leaked | |
-// | | | |(REPORT)| |
-// +----------+--+ | Released | Escape +--------+ |
-// | | | +---------------------------+
-// | Not tracked <--+ +----+---+-+
-// | | | | | As argument by value
-// +------+------+ | release_func | +------+ in function call
-// | | | | or by reference in
-// | | | | use_func call
-// +---------+ +----v-----+ | +-----------+
-// acquire_func failed | Double | +-----> Use after |
-// | released | | released |
-// | (REPORT) | | (REPORT) |
-// +----------+ +-----------+
+// +-------------+ +------------+
+// acquire_func succeeded | | Escape | |
+// +-----------------> Allocated +---------> Escaped <--+
+// | | | | | |
+// | +-----+------++ +------------+ |
+// | | | |
+// acquire_func | release_func | +--+ |
+// failed | | | handle +--------+ |
+// +---------+ | | | dies | | |
+// | | | +----v-----+ +---------> Leaked | |
+// | | | | | |(REPORT)| |
+// | +----------+--+ | Released | Escape +--------+ |
+// | | | | +---------------------------+
+// +--> Not tracked | +----+---+-+
+// | | | | As argument by value
+// +----------+--+ release_func | +------+ in function call
+// | | | or by reference in
+// | | | use_func call
+// unowned | +----v-----+ | +-----------+
+// acquire_func | | Double | +-----> Use after |
+// succeeded | | released | | released |
+// | | (REPORT) | | (REPORT) |
+// +---------------+ +----------+ +-----------+
+// | Allocated |
+// | Unowned | release_func
+// | +---------+
+// +---------------+ |
+// |
+// +-----v----------+
+// | Release of |
+// | unowned handle |
+// | (REPORT) |
+// +----------------+
//
// acquire_func represents the functions or syscalls that may acquire a handle.
// release_func represents the functions or syscalls that may release a handle.
@@ -53,7 +63,7 @@
//
// Note that, the analyzer does not always know for sure if a function failed
// or succeeded. In those cases we use the state MaybeAllocated.
-// Thus, the diagramm above captures the intent, not implementation details.
+// Thus, the diagram above captures the intent, not implementation details.
//
// Due to the fact that the number of handle related syscalls in Fuchsia
// is large, we adopt the annotation attributes to descript syscalls'
@@ -102,7 +112,7 @@ static const StringRef ErrorTypeName = "zx_status_t";
class HandleState {
private:
- enum class Kind { MaybeAllocated, Allocated, Released, Escaped } K;
+ enum class Kind { MaybeAllocated, Allocated, Released, Escaped, Unowned } K;
SymbolRef ErrorSym;
HandleState(Kind K, SymbolRef ErrorSym) : K(K), ErrorSym(ErrorSym) {}
@@ -114,6 +124,7 @@ public:
bool maybeAllocated() const { return K == Kind::MaybeAllocated; }
bool isReleased() const { return K == Kind::Released; }
bool isEscaped() const { return K == Kind::Escaped; }
+ bool isUnowned() const { return K == Kind::Unowned; }
static HandleState getMaybeAllocated(SymbolRef ErrorSym) {
return HandleState(Kind::MaybeAllocated, ErrorSym);
@@ -131,6 +142,9 @@ public:
static HandleState getEscaped() {
return HandleState(Kind::Escaped, nullptr);
}
+ static HandleState getUnowned() {
+ return HandleState(Kind::Unowned, nullptr);
+ }
SymbolRef getErrorSym() const { return ErrorSym; }
@@ -149,6 +163,7 @@ public:
CASE(Kind::Allocated)
CASE(Kind::Released)
CASE(Kind::Escaped)
+ CASE(Kind::Unowned)
}
if (ErrorSym) {
OS << " ErrorSym: ";
@@ -163,6 +178,11 @@ template <typename Attr> static bool hasFuchsiaAttr(const Decl *D) {
return D->hasAttr<Attr>() && D->getAttr<Attr>()->getHandleType() == "Fuchsia";
}
+template <typename Attr> static bool hasFuchsiaUnownedAttr(const Decl *D) {
+ return D->hasAttr<Attr>() &&
+ D->getAttr<Attr>()->getHandleType() == "FuchsiaUnowned";
+}
+
class FuchsiaHandleChecker
: public Checker<check::PostCall, check::PreCall, check::DeadSymbols,
check::PointerEscape, eval::Assume> {
@@ -172,6 +192,8 @@ class FuchsiaHandleChecker
"Fuchsia Handle Error"};
BugType UseAfterReleaseBugType{this, "Fuchsia handle use after release",
"Fuchsia Handle Error"};
+ BugType ReleaseUnownedBugType{
+ this, "Fuchsia handle release of unowned handle", "Fuchsia Handle Error"};
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
@@ -190,6 +212,9 @@ public:
void reportDoubleRelease(SymbolRef HandleSym, const SourceRange &Range,
CheckerContext &C) const;
+ void reportUnownedRelease(SymbolRef HandleSym, const SourceRange &Range,
+ CheckerContext &C) const;
+
void reportUseAfterFree(SymbolRef HandleSym, const SourceRange &Range,
CheckerContext &C) const;
@@ -226,32 +251,70 @@ static const ExplodedNode *getAcquireSite(const ExplodedNode *N, SymbolRef Sym,
return nullptr;
}
-/// Returns the symbols extracted from the argument or null if it cannot be
-/// found.
-static SymbolRef getFuchsiaHandleSymbol(QualType QT, SVal Arg,
- ProgramStateRef State) {
+namespace {
+class FuchsiaHandleSymbolVisitor final : public SymbolVisitor {
+public:
+ FuchsiaHandleSymbolVisitor(ProgramStateRef State) : State(std::move(State)) {}
+ ProgramStateRef getState() const { return State; }
+
+ bool VisitSymbol(SymbolRef S) override {
+ if (const auto *HandleType = S->getType()->getAs<TypedefType>())
+ if (HandleType->getDecl()->getName() == HandleTypeName)
+ Symbols.push_back(S);
+ return true;
+ }
+
+ SmallVector<SymbolRef, 1024> GetSymbols() { return Symbols; }
+
+private:
+ SmallVector<SymbolRef, 1024> Symbols;
+ ProgramStateRef State;
+};
+} // end anonymous namespace
+
+/// Returns the symbols extracted from the argument or empty vector if it cannot
+/// be found. It is unlikely to have over 1024 symbols in one argument.
+static SmallVector<SymbolRef, 1024>
+getFuchsiaHandleSymbols(QualType QT, SVal Arg, ProgramStateRef State) {
int PtrToHandleLevel = 0;
while (QT->isAnyPointerType() || QT->isReferenceType()) {
++PtrToHandleLevel;
QT = QT->getPointeeType();
}
+ if (QT->isStructureType()) {
+ // If we see a structure, see if there is any handle referenced by the
+ // structure.
+ FuchsiaHandleSymbolVisitor Visitor(State);
+ State->scanReachableSymbols(Arg, Visitor);
+ return Visitor.GetSymbols();
+ }
if (const auto *HandleType = QT->getAs<TypedefType>()) {
if (HandleType->getDecl()->getName() != HandleTypeName)
- return nullptr;
- if (PtrToHandleLevel > 1) {
+ return {};
+ if (PtrToHandleLevel > 1)
// Not supported yet.
- return nullptr;
- }
+ return {};
if (PtrToHandleLevel == 0) {
- return Arg.getAsSymbol();
+ SymbolRef Sym = Arg.getAsSymbol();
+ if (Sym) {
+ return {Sym};
+ } else {
+ return {};
+ }
} else {
assert(PtrToHandleLevel == 1);
- if (Optional<Loc> ArgLoc = Arg.getAs<Loc>())
- return State->getSVal(*ArgLoc).getAsSymbol();
+ if (Optional<Loc> ArgLoc = Arg.getAs<Loc>()) {
+ SymbolRef Sym = State->getSVal(*ArgLoc).getAsSymbol();
+ if (Sym) {
+ return {Sym};
+ } else {
+ return {};
+ }
+ }
}
}
- return nullptr;
+ return {};
}
void FuchsiaHandleChecker::checkPreCall(const CallEvent &Call,
@@ -273,31 +336,27 @@ void FuchsiaHandleChecker::checkPreCall(const CallEvent &Call,
if (Arg >= FuncDecl->getNumParams())
break;
const ParmVarDecl *PVD = FuncDecl->getParamDecl(Arg);
- SymbolRef Handle =
- getFuchsiaHandleSymbol(PVD->getType(), Call.getArgSVal(Arg), State);
- if (!Handle)
- continue;
+ SmallVector<SymbolRef, 1024> Handles =
+ getFuchsiaHandleSymbols(PVD->getType(), Call.getArgSVal(Arg), State);
// Handled in checkPostCall.
if (hasFuchsiaAttr<ReleaseHandleAttr>(PVD) ||
hasFuchsiaAttr<AcquireHandleAttr>(PVD))
continue;
- const HandleState *HState = State->get<HStateMap>(Handle);
- if (!HState || HState->isEscaped())
- continue;
+ for (SymbolRef Handle : Handles) {
+ const HandleState *HState = State->get<HStateMap>(Handle);
+ if (!HState || HState->isEscaped())
+ continue;
- if (hasFuchsiaAttr<UseHandleAttr>(PVD) || PVD->getType()->isIntegerType()) {
- if (HState->isReleased()) {
- reportUseAfterFree(Handle, Call.getArgSourceRange(Arg), C);
- return;
+ if (hasFuchsiaAttr<UseHandleAttr>(PVD) ||
+ PVD->getType()->isIntegerType()) {
+ if (HState->isReleased()) {
+ reportUseAfterFree(Handle, Call.getArgSourceRange(Arg), C);
+ return;
+ }
}
}
- if (!hasFuchsiaAttr<UseHandleAttr>(PVD) &&
- PVD->getType()->isIntegerType()) {
- // Working around integer by-value escapes.
- State = State->set<HStateMap>(Handle, HandleState::getEscaped());
- }
}
C.addTransition(State);
}
@@ -308,6 +367,10 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
if (!FuncDecl)
return;
+ // If we analyzed the function body, then ignore the annotations.
+ if (C.wasInlined)
+ return;
+
ProgramStateRef State = C.getState();
std::vector<std::function<std::string(BugReport & BR)>> Notes;
@@ -324,7 +387,7 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
if (auto IsInteresting = PathBR->getInterestingnessKind(RetSym)) {
std::string SBuf;
llvm::raw_string_ostream OS(SBuf);
- OS << "Function '" << FuncDecl->getNameAsString()
+ OS << "Function '" << FuncDecl->getDeclName()
<< "' returns an open handle";
return OS.str();
} else
@@ -332,6 +395,21 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
});
State =
State->set<HStateMap>(RetSym, HandleState::getMaybeAllocated(nullptr));
+ } else if (hasFuchsiaUnownedAttr<AcquireHandleAttr>(FuncDecl)) {
+ // Function returns an unowned handle
+ SymbolRef RetSym = Call.getReturnValue().getAsSymbol();
+ Notes.push_back([RetSym, FuncDecl](BugReport &BR) -> std::string {
+ auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
+ if (auto IsInteresting = PathBR->getInterestingnessKind(RetSym)) {
+ std::string SBuf;
+ llvm::raw_string_ostream OS(SBuf);
+ OS << "Function '" << FuncDecl->getDeclName()
+ << "' returns an unowned handle";
+ return OS.str();
+ } else
+ return "";
+ });
+ State = State->set<HStateMap>(RetSym, HandleState::getUnowned());
}
for (unsigned Arg = 0; Arg < Call.getNumArgs(); ++Arg) {
@@ -339,63 +417,88 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
break;
const ParmVarDecl *PVD = FuncDecl->getParamDecl(Arg);
unsigned ParamDiagIdx = PVD->getFunctionScopeIndex() + 1;
- SymbolRef Handle =
- getFuchsiaHandleSymbol(PVD->getType(), Call.getArgSVal(Arg), State);
- if (!Handle)
- continue;
+ SmallVector<SymbolRef, 1024> Handles =
+ getFuchsiaHandleSymbols(PVD->getType(), Call.getArgSVal(Arg), State);
- const HandleState *HState = State->get<HStateMap>(Handle);
- if (HState && HState->isEscaped())
- continue;
- if (hasFuchsiaAttr<ReleaseHandleAttr>(PVD)) {
- if (HState && HState->isReleased()) {
- reportDoubleRelease(Handle, Call.getArgSourceRange(Arg), C);
- return;
- } else {
+ for (SymbolRef Handle : Handles) {
+ const HandleState *HState = State->get<HStateMap>(Handle);
+ if (HState && HState->isEscaped())
+ continue;
+ if (hasFuchsiaAttr<ReleaseHandleAttr>(PVD)) {
+ if (HState && HState->isReleased()) {
+ reportDoubleRelease(Handle, Call.getArgSourceRange(Arg), C);
+ return;
+ } else if (HState && HState->isUnowned()) {
+ reportUnownedRelease(Handle, Call.getArgSourceRange(Arg), C);
+ return;
+ } else {
+ Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
+ auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
+ if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
+ std::string SBuf;
+ llvm::raw_string_ostream OS(SBuf);
+ OS << "Handle released through " << ParamDiagIdx
+ << llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
+ return OS.str();
+ } else
+ return "";
+ });
+ State = State->set<HStateMap>(Handle, HandleState::getReleased());
+ }
+ } else if (hasFuchsiaAttr<AcquireHandleAttr>(PVD)) {
Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
std::string SBuf;
llvm::raw_string_ostream OS(SBuf);
- OS << "Handle released through " << ParamDiagIdx
+ OS << "Handle allocated through " << ParamDiagIdx
<< llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
return OS.str();
} else
return "";
});
- State = State->set<HStateMap>(Handle, HandleState::getReleased());
+ State = State->set<HStateMap>(
+ Handle, HandleState::getMaybeAllocated(ResultSymbol));
+ } else if (hasFuchsiaUnownedAttr<AcquireHandleAttr>(PVD)) {
+ Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
+ auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
+ if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
+ std::string SBuf;
+ llvm::raw_string_ostream OS(SBuf);
+ OS << "Unowned handle allocated through " << ParamDiagIdx
+ << llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
+ return OS.str();
+ } else
+ return "";
+ });
+ State = State->set<HStateMap>(Handle, HandleState::getUnowned());
+ } else if (!hasFuchsiaAttr<UseHandleAttr>(PVD) &&
+ PVD->getType()->isIntegerType()) {
+ // Working around integer by-value escapes.
+ // The by-value escape would not be captured in checkPointerEscape.
+ // If the function was not analyzed (otherwise wasInlined should be
+ // true) and there is no annotation on the handle, we assume the handle
+ // is escaped.
+ State = State->set<HStateMap>(Handle, HandleState::getEscaped());
}
- } else if (hasFuchsiaAttr<AcquireHandleAttr>(PVD)) {
- Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
- auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
- if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
- std::string SBuf;
- llvm::raw_string_ostream OS(SBuf);
- OS << "Handle allocated through " << ParamDiagIdx
- << llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
- return OS.str();
- } else
- return "";
- });
- State = State->set<HStateMap>(
- Handle, HandleState::getMaybeAllocated(ResultSymbol));
}
}
const NoteTag *T = nullptr;
if (!Notes.empty()) {
T = C.getNoteTag([this, Notes{std::move(Notes)}](
PathSensitiveBugReport &BR) -> std::string {
- if (&BR.getBugType() != &UseAfterReleaseBugType &&
- &BR.getBugType() != &LeakBugType &&
- &BR.getBugType() != &DoubleReleaseBugType)
- return "";
- for (auto &Note : Notes) {
- std::string Text = Note(BR);
- if (!Text.empty())
- return Text;
- }
- return "";
- });
+ if (&BR.getBugType() != &UseAfterReleaseBugType &&
+ &BR.getBugType() != &LeakBugType &&
+ &BR.getBugType() != &DoubleReleaseBugType &&
+ &BR.getBugType() != &ReleaseUnownedBugType)
+ return "";
+ for (auto &Note : Notes) {
+ std::string Text = Note(BR);
+ if (!Text.empty())
+ return Text;
+ }
+ return "";
+ });
}
C.addTransition(State, T);
}
@@ -481,13 +584,14 @@ ProgramStateRef FuchsiaHandleChecker::checkPointerEscape(
if (Arg >= FuncDecl->getNumParams())
break;
const ParmVarDecl *PVD = FuncDecl->getParamDecl(Arg);
- SymbolRef Handle =
- getFuchsiaHandleSymbol(PVD->getType(), Call->getArgSVal(Arg), State);
- if (!Handle)
- continue;
- if (hasFuchsiaAttr<UseHandleAttr>(PVD) ||
- hasFuchsiaAttr<ReleaseHandleAttr>(PVD))
- UnEscaped.insert(Handle);
+ SmallVector<SymbolRef, 1024> Handles =
+ getFuchsiaHandleSymbols(PVD->getType(), Call->getArgSVal(Arg), State);
+ for (SymbolRef Handle : Handles) {
+ if (hasFuchsiaAttr<UseHandleAttr>(PVD) ||
+ hasFuchsiaAttr<ReleaseHandleAttr>(PVD)) {
+ UnEscaped.insert(Handle);
+ }
+ }
}
}
@@ -525,6 +629,14 @@ void FuchsiaHandleChecker::reportDoubleRelease(SymbolRef HandleSym,
"Releasing a previously released handle");
}
+void FuchsiaHandleChecker::reportUnownedRelease(SymbolRef HandleSym,
+ const SourceRange &Range,
+ CheckerContext &C) const {
+ ExplodedNode *ErrNode = C.generateErrorNode(C.getState());
+ reportBug(HandleSym, ErrNode, C, &Range, ReleaseUnownedBugType,
+ "Releasing an unowned handle");
+}
+
void FuchsiaHandleChecker::reportUseAfterFree(SymbolRef HandleSym,
const SourceRange &Range,
CheckerContext &C) const {
diff --git a/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index c06d2fcd8e7d..42c777eb2c52 100644
--- a/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -103,6 +103,9 @@ private:
struct FunctionData {
FunctionData() = delete;
+ FunctionData(const FunctionDecl *FDecl, StringRef Name,
+ std::string FullName)
+ : FDecl(FDecl), Name(Name), FullName(std::move(FullName)) {}
FunctionData(const FunctionData &) = default;
FunctionData(FunctionData &&) = default;
FunctionData &operator=(const FunctionData &) = delete;
@@ -123,7 +126,7 @@ private:
if (Name.empty() || FullName.empty())
return None;
- return FunctionData{FDecl, Name, FullName};
+ return FunctionData{FDecl, Name, std::move(FullName)};
}
bool isInScope(StringRef Scope) const {
diff --git a/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
index fd8cbd694b24..ab5e6a1c9991 100644
--- a/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
@@ -109,7 +109,7 @@ class IteratorModeling
bool Postfix) const;
void handleRandomIncrOrDecr(CheckerContext &C, const Expr *CE,
OverloadedOperatorKind Op, const SVal &RetVal,
- const SVal &LHS, const SVal &RHS) const;
+ const SVal &Iterator, const SVal &Amount) const;
void handlePtrIncrOrDecr(CheckerContext &C, const Expr *Iterator,
OverloadedOperatorKind OK, SVal Offset) const;
void handleAdvance(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
@@ -262,18 +262,30 @@ void IteratorModeling::checkPostStmt(const UnaryOperator *UO,
void IteratorModeling::checkPostStmt(const BinaryOperator *BO,
CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- BinaryOperatorKind OK = BO->getOpcode();
- SVal RVal = State->getSVal(BO->getRHS(), C.getLocationContext());
+ const ProgramStateRef State = C.getState();
+ const BinaryOperatorKind OK = BO->getOpcode();
+ const Expr *const LHS = BO->getLHS();
+ const Expr *const RHS = BO->getRHS();
+ const SVal LVal = State->getSVal(LHS, C.getLocationContext());
+ const SVal RVal = State->getSVal(RHS, C.getLocationContext());
if (isSimpleComparisonOperator(BO->getOpcode())) {
- SVal LVal = State->getSVal(BO->getLHS(), C.getLocationContext());
SVal Result = State->getSVal(BO, C.getLocationContext());
handleComparison(C, BO, Result, LVal, RVal,
BinaryOperator::getOverloadedOperator(OK));
} else if (isRandomIncrOrDecrOperator(OK)) {
- handlePtrIncrOrDecr(C, BO->getLHS(),
- BinaryOperator::getOverloadedOperator(OK), RVal);
+ // In case of operator+ the iterator can be either on the LHS (eg.: it + 1),
+ // or on the RHS (eg.: 1 + it). Both cases are modeled.
+ const bool IsIterOnLHS = BO->getLHS()->getType()->isPointerType();
+ const Expr *const &IterExpr = IsIterOnLHS ? LHS : RHS;
+ const Expr *const &AmountExpr = IsIterOnLHS ? RHS : LHS;
+
+ // The non-iterator side must have an integral or enumeration type.
+ if (!AmountExpr->getType()->isIntegralOrEnumerationType())
+ return;
+ const SVal &AmountVal = IsIterOnLHS ? RVal : LVal;
+ handlePtrIncrOrDecr(C, IterExpr, BinaryOperator::getOverloadedOperator(OK),
+ AmountVal);
}
}
@@ -366,11 +378,24 @@ IteratorModeling::handleOverloadedOperator(CheckerContext &C,
InstCall->getCXXThisVal(), Call.getArgSVal(0));
return;
}
- } else {
- if (Call.getNumArgs() >= 2 &&
- Call.getArgExpr(1)->getType()->isIntegralOrEnumerationType()) {
+ } else if (Call.getNumArgs() >= 2) {
+ const Expr *FirstArg = Call.getArgExpr(0);
+ const Expr *SecondArg = Call.getArgExpr(1);
+ const QualType FirstType = FirstArg->getType();
+ const QualType SecondType = SecondArg->getType();
+
+ if (FirstType->isIntegralOrEnumerationType() ||
+ SecondType->isIntegralOrEnumerationType()) {
+ // In case of operator+ the iterator can be either on the LHS (eg.:
+ // it + 1), or on the RHS (eg.: 1 + it). Both cases are modeled.
+ const bool IsIterFirst = FirstType->isStructureOrClassType();
+ const SVal FirstArg = Call.getArgSVal(0);
+ const SVal SecondArg = Call.getArgSVal(1);
+ const SVal &Iterator = IsIterFirst ? FirstArg : SecondArg;
+ const SVal &Amount = IsIterFirst ? SecondArg : FirstArg;
+
handleRandomIncrOrDecr(C, OrigExpr, Op, Call.getReturnValue(),
- Call.getArgSVal(0), Call.getArgSVal(1));
+ Iterator, Amount);
return;
}
}
@@ -461,6 +486,12 @@ void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
RPos = getIteratorPosition(State, RVal);
}
+ // If the value for which we just tried to set a new iterator position is
+ // an `SVal`for which no iterator position can be set then the setting was
+ // unsuccessful. We cannot handle the comparison in this case.
+ if (!LPos || !RPos)
+ return;
+
// We cannot make assumptions on `UnknownVal`. Let us conjure a symbol
// instead.
if (RetVal.isUnknown()) {
@@ -556,35 +587,35 @@ void IteratorModeling::handleDecrement(CheckerContext &C, const SVal &RetVal,
C.addTransition(State);
}
-void IteratorModeling::handleRandomIncrOrDecr(CheckerContext &C,
- const Expr *CE,
+void IteratorModeling::handleRandomIncrOrDecr(CheckerContext &C, const Expr *CE,
OverloadedOperatorKind Op,
const SVal &RetVal,
- const SVal &LHS,
- const SVal &RHS) const {
+ const SVal &Iterator,
+ const SVal &Amount) const {
// Increment or decrement the symbolic expressions which represents the
// position of the iterator
auto State = C.getState();
- const auto *Pos = getIteratorPosition(State, LHS);
+ const auto *Pos = getIteratorPosition(State, Iterator);
if (!Pos)
return;
- const auto *value = &RHS;
- SVal val;
- if (auto loc = RHS.getAs<Loc>()) {
- val = State->getRawSVal(*loc);
- value = &val;
+ const auto *Value = &Amount;
+ SVal Val;
+ if (auto LocAmount = Amount.getAs<Loc>()) {
+ Val = State->getRawSVal(*LocAmount);
+ Value = &Val;
}
- auto &TgtVal = (Op == OO_PlusEqual || Op == OO_MinusEqual) ? LHS : RetVal;
+ const auto &TgtVal =
+ (Op == OO_PlusEqual || Op == OO_MinusEqual) ? Iterator : RetVal;
// `AdvancedState` is a state where the position of `LHS` is advanced. We
// only need this state to retrieve the new position, but we do not want
// to change the position of `LHS` (in every case).
- auto AdvancedState = advancePosition(State, LHS, Op, *value);
+ auto AdvancedState = advancePosition(State, Iterator, Op, *Value);
if (AdvancedState) {
- const auto *NewPos = getIteratorPosition(AdvancedState, LHS);
+ const auto *NewPos = getIteratorPosition(AdvancedState, Iterator);
assert(NewPos &&
"Iterator should have position after successful advancement");
@@ -599,6 +630,9 @@ void IteratorModeling::handlePtrIncrOrDecr(CheckerContext &C,
const Expr *Iterator,
OverloadedOperatorKind OK,
SVal Offset) const {
+ if (!Offset.getAs<DefinedSVal>())
+ return;
+
QualType PtrType = Iterator->getType();
if (!PtrType->isPointerType())
return;
@@ -612,13 +646,11 @@ void IteratorModeling::handlePtrIncrOrDecr(CheckerContext &C,
return;
SVal NewVal;
- if (OK == OO_Plus || OK == OO_PlusEqual)
+ if (OK == OO_Plus || OK == OO_PlusEqual) {
NewVal = State->getLValue(ElementType, Offset, OldVal);
- else {
- const llvm::APSInt &OffsetInt =
- Offset.castAs<nonloc::ConcreteInt>().getValue();
- auto &BVF = C.getSymbolManager().getBasicVals();
- SVal NegatedOffset = nonloc::ConcreteInt(BVF.getValue(-OffsetInt));
+ } else {
+ auto &SVB = C.getSValBuilder();
+ SVal NegatedOffset = SVB.evalMinus(Offset.castAs<NonLoc>());
NewVal = State->getLValue(ElementType, NegatedOffset, OldVal);
}
@@ -684,9 +716,14 @@ bool IteratorModeling::noChangeInAdvance(CheckerContext &C, SVal Iter,
const auto StateBefore = N->getState();
const auto *PosBefore = getIteratorPosition(StateBefore, Iter);
-
- assert(PosBefore && "`std::advance() should not create new iterator "
- "position but change existing ones");
+ // FIXME: `std::advance()` should not create a new iterator position but
+ // change existing ones. However, in case of iterators implemented as
+ // pointers the handling of parameters in `std::advance()`-like
+ // functions is still incomplete which may result in cases where
+ // the new position is assigned to the wrong pointer. This causes
+ // crash if we use an assertion here.
+ if (!PosBefore)
+ return false;
return PosBefore->getOffset() == PosAfter->getOffset();
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
index df8e379d1f20..dd014648eb6f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
@@ -169,6 +169,8 @@ void IteratorRangeChecker::checkPreStmt(const BinaryOperator *BO,
verifyDereference(C, LVal);
} else if (isRandomIncrOrDecrOperator(OK)) {
SVal RVal = State->getSVal(BO->getRHS(), C.getLocationContext());
+ if (!BO->getRHS()->getType()->isIntegralOrEnumerationType())
+ return;
verifyRandomIncrOrDecr(C, BinaryOperator::getOverloadedOperator(OK), LVal,
RVal);
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index 252377f24bd7..28d3e058fee2 100644
--- a/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -1141,10 +1141,9 @@ void EmptyLocalizationContextChecker::MethodCrawler::VisitObjCMessageExpr(
SE = Mgr.getSourceManager().getSLocEntry(SLInfo.first);
}
- bool Invalid = false;
- const llvm::MemoryBuffer *BF =
- Mgr.getSourceManager().getBuffer(SLInfo.first, SL, &Invalid);
- if (Invalid)
+ llvm::Optional<llvm::MemoryBufferRef> BF =
+ Mgr.getSourceManager().getBufferOrNone(SLInfo.first, SL);
+ if (!BF)
return;
Lexer TheLexer(SL, LangOptions(), BF->getBufferStart(),
diff --git a/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index 87477e96d2d1..a157ee2da5df 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -509,7 +509,7 @@ ProgramStateRef MacOSKeychainAPIChecker::evalAssume(ProgramStateRef State,
if (AMap.isEmpty())
return State;
- auto *CondBSE = dyn_cast_or_null<BinarySymExpr>(Cond.getAsSymExpr());
+ auto *CondBSE = dyn_cast_or_null<BinarySymExpr>(Cond.getAsSymbol());
if (!CondBSE)
return State;
BinaryOperator::Opcode OpCode = CondBSE->getOpcode();
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index d5b0a5b2220f..f117d5505ecb 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -3110,11 +3110,6 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
return true;
}
- if (FName == "postEvent" &&
- FD->getQualifiedNameAsString() == "QCoreApplication::postEvent") {
- return true;
- }
-
if (FName == "connectImpl" &&
FD->getQualifiedNameAsString() == "QObject::connectImpl") {
return true;
@@ -3301,14 +3296,16 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N,
OS << "reallocated by call to '";
const Stmt *S = RSCurr->getStmt();
if (const auto *MemCallE = dyn_cast<CXXMemberCallExpr>(S)) {
- OS << MemCallE->getMethodDecl()->getNameAsString();
+ OS << MemCallE->getMethodDecl()->getDeclName();
} else if (const auto *OpCallE = dyn_cast<CXXOperatorCallExpr>(S)) {
- OS << OpCallE->getDirectCallee()->getNameAsString();
+ OS << OpCallE->getDirectCallee()->getDeclName();
} else if (const auto *CallE = dyn_cast<CallExpr>(S)) {
auto &CEMgr = BRC.getStateManager().getCallEventManager();
CallEventRef<> Call = CEMgr.getSimpleCall(CallE, state, CurrentLC);
- const auto *D = dyn_cast_or_null<NamedDecl>(Call->getDecl());
- OS << (D ? D->getNameAsString() : "unknown");
+ if (const auto *D = dyn_cast_or_null<NamedDecl>(Call->getDecl()))
+ OS << D->getDeclName();
+ else
+ OS << "unknown";
}
OS << "'";
StackHint = std::make_unique<StackHintGeneratorForSymbol>(
diff --git a/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
index 7f0519c695b0..a38298a7abed 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
@@ -104,7 +104,7 @@ private:
"basic_ios",
"future",
"optional",
- "packaged_task"
+ "packaged_task",
"promise",
"shared_future",
"shared_lock",
@@ -580,7 +580,7 @@ void MoveChecker::explainObject(llvm::raw_ostream &OS, const MemRegion *MR,
if (const auto DR =
dyn_cast_or_null<DeclRegion>(unwrapRValueReferenceIndirection(MR))) {
const auto *RegionDecl = cast<NamedDecl>(DR->getDecl());
- OS << " '" << RegionDecl->getNameAsString() << "'";
+ OS << " '" << RegionDecl->getDeclName() << "'";
}
ObjectKind OK = classifyObject(MR, RD);
diff --git a/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp b/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
index 53ed0e187a4c..270b66dab020 100644
--- a/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
@@ -24,32 +24,36 @@ using namespace ento;
using namespace ast_matchers;
namespace {
-
-const char *WarnAtNode = "OSObjCast";
+static constexpr const char *const WarnAtNode = "WarnAtNode";
+static constexpr const char *const WarnRecordDecl = "WarnRecordDecl";
class OSObjectCStyleCastChecker : public Checker<check::ASTCodeBody> {
public:
- void checkASTCodeBody(const Decl *D,
- AnalysisManager &AM,
+ void checkASTCodeBody(const Decl *D, AnalysisManager &AM,
BugReporter &BR) const;
};
+}
static void emitDiagnostics(const BoundNodes &Nodes,
BugReporter &BR,
AnalysisDeclContext *ADC,
const OSObjectCStyleCastChecker *Checker) {
const auto *CE = Nodes.getNodeAs<CastExpr>(WarnAtNode);
- assert(CE);
+ const CXXRecordDecl *RD = Nodes.getNodeAs<CXXRecordDecl>(WarnRecordDecl);
+ assert(CE && RD);
std::string Diagnostics;
llvm::raw_string_ostream OS(Diagnostics);
- OS << "C-style cast of OSObject. Use OSDynamicCast instead.";
+ OS << "C-style cast of an OSObject is prone to type confusion attacks; "
+ << "use 'OSRequiredCast' if the object is definitely of type '"
+ << RD->getNameAsString() << "', or 'OSDynamicCast' followed by "
+ << "a null check if unsure",
BR.EmitBasicReport(
ADC->getDecl(),
Checker,
/*Name=*/"OSObject C-Style Cast",
- /*BugCategory=*/"Security",
+ categories::SecurityError,
OS.str(),
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), ADC),
CE->getSourceRange());
@@ -68,7 +72,7 @@ void OSObjectCStyleCastChecker::checkASTCodeBody(const Decl *D, AnalysisManager
auto OSObjTypeM = hasTypePointingTo(cxxRecordDecl(isDerivedFrom("OSMetaClassBase")));
auto OSObjSubclassM = hasTypePointingTo(
- cxxRecordDecl(isDerivedFrom("OSObject")));
+ cxxRecordDecl(isDerivedFrom("OSObject")).bind(WarnRecordDecl));
auto CastM = cStyleCastExpr(
allOf(hasSourceExpression(allOf(OSObjTypeM, unless(DynamicCastM))),
@@ -78,7 +82,6 @@ void OSObjectCStyleCastChecker::checkASTCodeBody(const Decl *D, AnalysisManager
for (BoundNodes Match : Matches)
emitDiagnostics(Match, BR, ADC, this);
}
-}
void ento::registerOSObjectCStyleCast(CheckerManager &Mgr) {
Mgr.registerChecker<OSObjectCStyleCastChecker>();
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index 24e2a4dea922..35a600f2d7b8 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -21,7 +21,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -74,7 +74,7 @@ private:
void initializeSelectors(ASTContext &Ctx) const;
void fillSelectors(ASTContext &Ctx, ArrayRef<SelectorDescriptor> Sel,
StringRef ClassName) const;
- mutable llvm::StringMap<llvm::SmallSet<Selector, 16> > SelectorsForClass;
+ mutable llvm::StringMap<llvm::SmallPtrSet<Selector, 16>> SelectorsForClass;
mutable bool IsInitialized;
};
@@ -100,7 +100,8 @@ bool ObjCSuperCallChecker::isCheckableClass(const ObjCImplementationDecl *D,
void ObjCSuperCallChecker::fillSelectors(ASTContext &Ctx,
ArrayRef<SelectorDescriptor> Sel,
StringRef ClassName) const {
- llvm::SmallSet<Selector, 16> &ClassSelectors = SelectorsForClass[ClassName];
+ llvm::SmallPtrSet<Selector, 16> &ClassSelectors =
+ SelectorsForClass[ClassName];
// Fill the Selectors SmallSet with all selectors we want to check.
for (ArrayRef<SelectorDescriptor>::iterator I = Sel.begin(), E = Sel.end();
I != E; ++I) {
diff --git a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index 0b00664c7c10..96f0d9bb3c3d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -248,8 +248,9 @@ public:
FieldInfo RetVal;
RetVal.Field = FD;
auto &Ctx = FD->getASTContext();
- std::tie(RetVal.Size, RetVal.Align) =
- Ctx.getTypeInfoInChars(FD->getType());
+ auto Info = Ctx.getTypeInfoInChars(FD->getType());
+ RetVal.Size = Info.Width;
+ RetVal.Align = Info.Align;
assert(llvm::isPowerOf2_64(RetVal.Align.getQuantity()));
if (auto Max = FD->getMaxAlignment())
RetVal.Align = std::max(Ctx.toCharUnitsFromBits(Max), RetVal.Align);
diff --git a/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 285d2da104f1..88e80c481a5a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -83,7 +83,7 @@ public:
private:
typedef void (PthreadLockChecker::*FnCheck)(const CallEvent &Call,
CheckerContext &C,
- CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
CallDescriptionMap<FnCheck> PThreadCallbacks = {
// Init.
{{"pthread_mutex_init", 2}, &PthreadLockChecker::InitAnyLock},
@@ -167,46 +167,49 @@ private:
ProgramStateRef resolvePossiblyDestroyedMutex(ProgramStateRef state,
const MemRegion *lockR,
const SymbolRef *sym) const;
- void reportUseDestroyedBug(const CallEvent &Call, CheckerContext &C,
- unsigned ArgNo, CheckerKind checkKind) const;
+ void reportBug(CheckerContext &C, std::unique_ptr<BugType> BT[],
+ const Expr *MtxExpr, CheckerKind CheckKind,
+ StringRef Desc) const;
// Init.
void InitAnyLock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkkind) const;
- void InitLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
- SVal Lock, CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
+ void InitLockAux(const CallEvent &Call, CheckerContext &C,
+ const Expr *MtxExpr, SVal MtxVal,
+ CheckerKind CheckKind) const;
// Lock, Try-lock.
void AcquirePthreadLock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
void AcquireXNULock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
void TryPthreadLock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
void TryXNULock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
void TryFuchsiaLock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
void TryC11Lock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkkind) const;
- void AcquireLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
- SVal lock, bool isTryLock, LockingSemantics semantics,
- CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
+ void AcquireLockAux(const CallEvent &Call, CheckerContext &C,
+ const Expr *MtxExpr, SVal MtxVal, bool IsTryLock,
+ LockingSemantics Semantics, CheckerKind CheckKind) const;
// Release.
void ReleaseAnyLock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkkind) const;
- void ReleaseLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
- SVal lock, CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
+ void ReleaseLockAux(const CallEvent &Call, CheckerContext &C,
+ const Expr *MtxExpr, SVal MtxVal,
+ CheckerKind CheckKind) const;
// Destroy.
void DestroyPthreadLock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
void DestroyXNULock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkkind) const;
- void DestroyLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
- SVal Lock, LockingSemantics semantics,
- CheckerKind checkkind) const;
+ CheckerKind CheckKind) const;
+ void DestroyLockAux(const CallEvent &Call, CheckerContext &C,
+ const Expr *MtxExpr, SVal MtxVal,
+ LockingSemantics Semantics, CheckerKind CheckKind) const;
public:
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
@@ -226,18 +229,18 @@ private:
mutable std::unique_ptr<BugType> BT_initlock[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BT_lor[CK_NumCheckKinds];
- void initBugType(CheckerKind checkKind) const {
- if (BT_doublelock[checkKind])
+ void initBugType(CheckerKind CheckKind) const {
+ if (BT_doublelock[CheckKind])
return;
- BT_doublelock[checkKind].reset(
- new BugType{CheckNames[checkKind], "Double locking", "Lock checker"});
- BT_doubleunlock[checkKind].reset(
- new BugType{CheckNames[checkKind], "Double unlocking", "Lock checker"});
- BT_destroylock[checkKind].reset(new BugType{
- CheckNames[checkKind], "Use destroyed lock", "Lock checker"});
- BT_initlock[checkKind].reset(new BugType{
- CheckNames[checkKind], "Init invalid lock", "Lock checker"});
- BT_lor[checkKind].reset(new BugType{CheckNames[checkKind],
+ BT_doublelock[CheckKind].reset(
+ new BugType{CheckNames[CheckKind], "Double locking", "Lock checker"});
+ BT_doubleunlock[CheckKind].reset(
+ new BugType{CheckNames[CheckKind], "Double unlocking", "Lock checker"});
+ BT_destroylock[CheckKind].reset(new BugType{
+ CheckNames[CheckKind], "Use destroyed lock", "Lock checker"});
+ BT_initlock[CheckKind].reset(new BugType{
+ CheckNames[CheckKind], "Init invalid lock", "Lock checker"});
+ BT_lor[CheckKind].reset(new BugType{CheckNames[CheckKind],
"Lock order reversal", "Lock checker"});
}
};
@@ -341,53 +344,53 @@ void PthreadLockChecker::printState(raw_ostream &Out, ProgramStateRef State,
void PthreadLockChecker::AcquirePthreadLock(const CallEvent &Call,
CheckerContext &C,
- CheckerKind checkKind) const {
- AcquireLockAux(Call, C, 0, Call.getArgSVal(0), false, PthreadSemantics,
- checkKind);
+ CheckerKind CheckKind) const {
+ AcquireLockAux(Call, C, Call.getArgExpr(0), Call.getArgSVal(0), false,
+ PthreadSemantics, CheckKind);
}
void PthreadLockChecker::AcquireXNULock(const CallEvent &Call,
CheckerContext &C,
- CheckerKind checkKind) const {
- AcquireLockAux(Call, C, 0, Call.getArgSVal(0), false, XNUSemantics,
- checkKind);
+ CheckerKind CheckKind) const {
+ AcquireLockAux(Call, C, Call.getArgExpr(0), Call.getArgSVal(0), false,
+ XNUSemantics, CheckKind);
}
void PthreadLockChecker::TryPthreadLock(const CallEvent &Call,
CheckerContext &C,
- CheckerKind checkKind) const {
- AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
- checkKind);
+ CheckerKind CheckKind) const {
+ AcquireLockAux(Call, C, Call.getArgExpr(0), Call.getArgSVal(0), true,
+ PthreadSemantics, CheckKind);
}
void PthreadLockChecker::TryXNULock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkKind) const {
- AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
- checkKind);
+ CheckerKind CheckKind) const {
+ AcquireLockAux(Call, C, Call.getArgExpr(0), Call.getArgSVal(0), true,
+ PthreadSemantics, CheckKind);
}
void PthreadLockChecker::TryFuchsiaLock(const CallEvent &Call,
CheckerContext &C,
- CheckerKind checkKind) const {
- AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
- checkKind);
+ CheckerKind CheckKind) const {
+ AcquireLockAux(Call, C, Call.getArgExpr(0), Call.getArgSVal(0), true,
+ PthreadSemantics, CheckKind);
}
void PthreadLockChecker::TryC11Lock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkKind) const {
- AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
- checkKind);
+ CheckerKind CheckKind) const {
+ AcquireLockAux(Call, C, Call.getArgExpr(0), Call.getArgSVal(0), true,
+ PthreadSemantics, CheckKind);
}
void PthreadLockChecker::AcquireLockAux(const CallEvent &Call,
- CheckerContext &C, unsigned ArgNo,
- SVal lock, bool isTryLock,
- enum LockingSemantics semantics,
- CheckerKind checkKind) const {
- if (!ChecksEnabled[checkKind])
+ CheckerContext &C, const Expr *MtxExpr,
+ SVal MtxVal, bool IsTryLock,
+ enum LockingSemantics Semantics,
+ CheckerKind CheckKind) const {
+ if (!ChecksEnabled[CheckKind])
return;
- const MemRegion *lockR = lock.getAsRegion();
+ const MemRegion *lockR = MtxVal.getAsRegion();
if (!lockR)
return;
@@ -398,28 +401,23 @@ void PthreadLockChecker::AcquireLockAux(const CallEvent &Call,
if (const LockState *LState = state->get<LockMap>(lockR)) {
if (LState->isLocked()) {
- ExplodedNode *N = C.generateErrorNode();
- if (!N)
- return;
- initBugType(checkKind);
- auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_doublelock[checkKind], "This lock has already been acquired", N);
- report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
- C.emitReport(std::move(report));
+ reportBug(C, BT_doublelock, MtxExpr, CheckKind,
+ "This lock has already been acquired");
return;
} else if (LState->isDestroyed()) {
- reportUseDestroyedBug(Call, C, ArgNo, checkKind);
+ reportBug(C, BT_destroylock, MtxExpr, CheckKind,
+ "This lock has already been destroyed");
return;
}
}
ProgramStateRef lockSucc = state;
- if (isTryLock) {
+ if (IsTryLock) {
// Bifurcate the state, and allow a mode where the lock acquisition fails.
SVal RetVal = Call.getReturnValue();
if (auto DefinedRetVal = RetVal.getAs<DefinedSVal>()) {
ProgramStateRef lockFail;
- switch (semantics) {
+ switch (Semantics) {
case PthreadSemantics:
std::tie(lockFail, lockSucc) = state->assume(*DefinedRetVal);
break;
@@ -434,7 +432,7 @@ void PthreadLockChecker::AcquireLockAux(const CallEvent &Call,
}
// We might want to handle the case when the mutex lock function was inlined
// and returned an Unknown or Undefined value.
- } else if (semantics == PthreadSemantics) {
+ } else if (Semantics == PthreadSemantics) {
// Assume that the return value was 0.
SVal RetVal = Call.getReturnValue();
if (auto DefinedRetVal = RetVal.getAs<DefinedSVal>()) {
@@ -447,7 +445,7 @@ void PthreadLockChecker::AcquireLockAux(const CallEvent &Call,
// and returned an Unknown or Undefined value.
} else {
// XNU locking semantics return void on non-try locks
- assert((semantics == XNUSemantics) && "Unknown locking semantics");
+ assert((Semantics == XNUSemantics) && "Unknown locking semantics");
lockSucc = state;
}
@@ -459,18 +457,18 @@ void PthreadLockChecker::AcquireLockAux(const CallEvent &Call,
void PthreadLockChecker::ReleaseAnyLock(const CallEvent &Call,
CheckerContext &C,
- CheckerKind checkKind) const {
- ReleaseLockAux(Call, C, 0, Call.getArgSVal(0), checkKind);
+ CheckerKind CheckKind) const {
+ ReleaseLockAux(Call, C, Call.getArgExpr(0), Call.getArgSVal(0), CheckKind);
}
void PthreadLockChecker::ReleaseLockAux(const CallEvent &Call,
- CheckerContext &C, unsigned ArgNo,
- SVal lock,
- CheckerKind checkKind) const {
- if (!ChecksEnabled[checkKind])
+ CheckerContext &C, const Expr *MtxExpr,
+ SVal MtxVal,
+ CheckerKind CheckKind) const {
+ if (!ChecksEnabled[CheckKind])
return;
- const MemRegion *lockR = lock.getAsRegion();
+ const MemRegion *lockR = MtxVal.getAsRegion();
if (!lockR)
return;
@@ -481,18 +479,12 @@ void PthreadLockChecker::ReleaseLockAux(const CallEvent &Call,
if (const LockState *LState = state->get<LockMap>(lockR)) {
if (LState->isUnlocked()) {
- ExplodedNode *N = C.generateErrorNode();
- if (!N)
- return;
- initBugType(checkKind);
- auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_doubleunlock[checkKind], "This lock has already been unlocked",
- N);
- Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
- C.emitReport(std::move(Report));
+ reportBug(C, BT_doubleunlock, MtxExpr, CheckKind,
+ "This lock has already been unlocked");
return;
} else if (LState->isDestroyed()) {
- reportUseDestroyedBug(Call, C, ArgNo, checkKind);
+ reportBug(C, BT_destroylock, MtxExpr, CheckKind,
+ "This lock has already been destroyed");
return;
}
}
@@ -502,17 +494,9 @@ void PthreadLockChecker::ReleaseLockAux(const CallEvent &Call,
if (!LS.isEmpty()) {
const MemRegion *firstLockR = LS.getHead();
if (firstLockR != lockR) {
- ExplodedNode *N = C.generateErrorNode();
- if (!N)
- return;
- initBugType(checkKind);
- auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_lor[checkKind],
- "This was not the most recently acquired lock. Possible "
- "lock order reversal",
- N);
- report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
- C.emitReport(std::move(report));
+ reportBug(C, BT_lor, MtxExpr, CheckKind,
+ "This was not the most recently acquired lock. Possible lock "
+ "order reversal");
return;
}
// Record that the lock was released.
@@ -525,25 +509,27 @@ void PthreadLockChecker::ReleaseLockAux(const CallEvent &Call,
void PthreadLockChecker::DestroyPthreadLock(const CallEvent &Call,
CheckerContext &C,
- CheckerKind checkKind) const {
- DestroyLockAux(Call, C, 0, Call.getArgSVal(0), PthreadSemantics, checkKind);
+ CheckerKind CheckKind) const {
+ DestroyLockAux(Call, C, Call.getArgExpr(0), Call.getArgSVal(0),
+ PthreadSemantics, CheckKind);
}
void PthreadLockChecker::DestroyXNULock(const CallEvent &Call,
CheckerContext &C,
- CheckerKind checkKind) const {
- DestroyLockAux(Call, C, 0, Call.getArgSVal(0), XNUSemantics, checkKind);
+ CheckerKind CheckKind) const {
+ DestroyLockAux(Call, C, Call.getArgExpr(0), Call.getArgSVal(0), XNUSemantics,
+ CheckKind);
}
void PthreadLockChecker::DestroyLockAux(const CallEvent &Call,
- CheckerContext &C, unsigned ArgNo,
- SVal Lock,
- enum LockingSemantics semantics,
- CheckerKind checkKind) const {
- if (!ChecksEnabled[checkKind])
+ CheckerContext &C, const Expr *MtxExpr,
+ SVal MtxVal,
+ enum LockingSemantics Semantics,
+ CheckerKind CheckKind) const {
+ if (!ChecksEnabled[CheckKind])
return;
- const MemRegion *LockR = Lock.getAsRegion();
+ const MemRegion *LockR = MtxVal.getAsRegion();
if (!LockR)
return;
@@ -556,7 +542,7 @@ void PthreadLockChecker::DestroyLockAux(const CallEvent &Call,
const LockState *LState = State->get<LockMap>(LockR);
// Checking the return value of the destroy method only in the case of
// PthreadSemantics
- if (semantics == PthreadSemantics) {
+ if (Semantics == PthreadSemantics) {
if (!LState || LState->isUnlocked()) {
SymbolRef sym = Call.getReturnValue().getAsSymbol();
if (!sym) {
@@ -581,36 +567,26 @@ void PthreadLockChecker::DestroyLockAux(const CallEvent &Call,
return;
}
}
- StringRef Message;
- if (LState->isLocked()) {
- Message = "This lock is still locked";
- } else {
- Message = "This lock has already been destroyed";
- }
+ StringRef Message = LState->isLocked()
+ ? "This lock is still locked"
+ : "This lock has already been destroyed";
- ExplodedNode *N = C.generateErrorNode();
- if (!N)
- return;
- initBugType(checkKind);
- auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_destroylock[checkKind], Message, N);
- Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
- C.emitReport(std::move(Report));
+ reportBug(C, BT_destroylock, MtxExpr, CheckKind, Message);
}
void PthreadLockChecker::InitAnyLock(const CallEvent &Call, CheckerContext &C,
- CheckerKind checkKind) const {
- InitLockAux(Call, C, 0, Call.getArgSVal(0), checkKind);
+ CheckerKind CheckKind) const {
+ InitLockAux(Call, C, Call.getArgExpr(0), Call.getArgSVal(0), CheckKind);
}
void PthreadLockChecker::InitLockAux(const CallEvent &Call, CheckerContext &C,
- unsigned ArgNo, SVal Lock,
- CheckerKind checkKind) const {
- if (!ChecksEnabled[checkKind])
+ const Expr *MtxExpr, SVal MtxVal,
+ CheckerKind CheckKind) const {
+ if (!ChecksEnabled[CheckKind])
return;
- const MemRegion *LockR = Lock.getAsRegion();
+ const MemRegion *LockR = MtxVal.getAsRegion();
if (!LockR)
return;
@@ -627,35 +603,24 @@ void PthreadLockChecker::InitLockAux(const CallEvent &Call, CheckerContext &C,
return;
}
- StringRef Message;
-
- if (LState->isLocked()) {
- Message = "This lock is still being held";
- } else {
- Message = "This lock has already been initialized";
- }
+ StringRef Message = LState->isLocked()
+ ? "This lock is still being held"
+ : "This lock has already been initialized";
- ExplodedNode *N = C.generateErrorNode();
- if (!N)
- return;
- initBugType(checkKind);
- auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_initlock[checkKind], Message, N);
- Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
- C.emitReport(std::move(Report));
+ reportBug(C, BT_initlock, MtxExpr, CheckKind, Message);
}
-void PthreadLockChecker::reportUseDestroyedBug(const CallEvent &Call,
- CheckerContext &C,
- unsigned ArgNo,
- CheckerKind checkKind) const {
+void PthreadLockChecker::reportBug(CheckerContext &C,
+ std::unique_ptr<BugType> BT[],
+ const Expr *MtxExpr, CheckerKind CheckKind,
+ StringRef Desc) const {
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
- initBugType(checkKind);
- auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_destroylock[checkKind], "This lock has already been destroyed", N);
- Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
+ initBugType(CheckKind);
+ auto Report =
+ std::make_unique<PathSensitiveBugReport>(*BT[CheckKind], Desc, N);
+ Report->addRange(MtxExpr->getSourceRange());
C.emitReport(std::move(Report));
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index 1d8ed90f7590..1d903530201f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -177,7 +177,7 @@ static Optional<unsigned> findArgIdxOfSymbol(ProgramStateRef CurrSt,
for (unsigned Idx = 0; Idx < (*CE)->getNumArgs(); Idx++)
if (const MemRegion *MR = (*CE)->getArgSVal(Idx).getAsRegion())
if (const auto *TR = dyn_cast<TypedValueRegion>(MR))
- if (CurrSt->getSVal(MR, TR->getValueType()).getAsSymExpr() == Sym)
+ if (CurrSt->getSVal(MR, TR->getValueType()).getAsSymbol() == Sym)
return Idx;
return None;
@@ -439,7 +439,7 @@ annotateStartParameter(const ExplodedNode *N, SymbolRef Sym,
std::string s;
llvm::raw_string_ostream os(s);
- os << "Parameter '" << PVD->getNameAsString() << "' starts at +";
+ os << "Parameter '" << PVD->getDeclName() << "' starts at +";
if (CurrT->getCount() == 1) {
os << "1, as it is marked as consuming";
} else {
diff --git a/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index 599d4f306aa1..1a94ccdc2825 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -58,6 +58,11 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
+ // We assume that the location after the last element in the array is used as
+ // end() iterator. Reporting on these would return too many false positives.
+ if (Idx == ElementCount)
+ return;
+
ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
if (StOutBound && !StInBound) {
@@ -70,7 +75,7 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
// types explicitly reference such exploit categories (when applicable).
if (!BT)
BT.reset(new BuiltinBug(
- this, "Return of pointer value outside of expected range",
+ this, "Buffer overflow",
"Returned pointer value points outside the original object "
"(potential buffer overflow)"));
diff --git a/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h b/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
index ec43a23e30a9..92c386bbb2b0 100644
--- a/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
+++ b/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
@@ -26,6 +26,8 @@ bool isStdSmartPtrCall(const CallEvent &Call);
/// Returns whether the smart pointer is null or not.
bool isNullSmartPtr(const ProgramStateRef State, const MemRegion *ThisRegion);
+const BugType *getNullDereferenceBugType();
+
} // namespace smartptr
} // namespace ento
} // namespace clang
diff --git a/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp
index 7bb25f397d01..8a85d454856b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp
@@ -23,23 +23,40 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "llvm/ADT/StringRef.h"
using namespace clang;
using namespace ento;
namespace {
-class SmartPtrChecker : public Checker<check::PreCall> {
- BugType NullDereferenceBugType{this, "Null SmartPtr dereference",
- "C++ Smart Pointer"};
+static const BugType *NullDereferenceBugTypePtr;
+
+class SmartPtrChecker : public Checker<check::PreCall> {
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ BugType NullDereferenceBugType{this, "Null SmartPtr dereference",
+ "C++ Smart Pointer"};
private:
- void reportBug(CheckerContext &C, const CallEvent &Call) const;
+ void reportBug(CheckerContext &C, const MemRegion *DerefRegion,
+ const CallEvent &Call) const;
+ void explainDereference(llvm::raw_ostream &OS, const MemRegion *DerefRegion,
+ const CallEvent &Call) const;
};
} // end of anonymous namespace
+// Define the inter-checker API.
+namespace clang {
+namespace ento {
+namespace smartptr {
+
+const BugType *getNullDereferenceBugType() { return NullDereferenceBugTypePtr; }
+
+} // namespace smartptr
+} // namespace ento
+} // namespace clang
+
void SmartPtrChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
if (!smartptr::isStdSmartPtrCall(Call))
@@ -55,23 +72,34 @@ void SmartPtrChecker::checkPreCall(const CallEvent &Call,
OverloadedOperatorKind OOK = OC->getOverloadedOperator();
if (OOK == OO_Star || OOK == OO_Arrow) {
if (smartptr::isNullSmartPtr(State, ThisRegion))
- reportBug(C, Call);
+ reportBug(C, ThisRegion, Call);
}
}
-void SmartPtrChecker::reportBug(CheckerContext &C,
+void SmartPtrChecker::reportBug(CheckerContext &C, const MemRegion *DerefRegion,
const CallEvent &Call) const {
ExplodedNode *ErrNode = C.generateErrorNode();
if (!ErrNode)
return;
-
- auto R = std::make_unique<PathSensitiveBugReport>(
- NullDereferenceBugType, "Dereference of null smart pointer", ErrNode);
+ llvm::SmallString<128> Str;
+ llvm::raw_svector_ostream OS(Str);
+ explainDereference(OS, DerefRegion, Call);
+ auto R = std::make_unique<PathSensitiveBugReport>(NullDereferenceBugType,
+ OS.str(), ErrNode);
+ R->markInteresting(DerefRegion);
C.emitReport(std::move(R));
}
+void SmartPtrChecker::explainDereference(llvm::raw_ostream &OS,
+ const MemRegion *DerefRegion,
+ const CallEvent &Call) const {
+ OS << "Dereference of null smart pointer ";
+ DerefRegion->printPretty(OS);
+}
+
void ento::registerSmartPtrChecker(CheckerManager &Mgr) {
- Mgr.registerChecker<SmartPtrChecker>();
+ SmartPtrChecker *Checker = Mgr.registerChecker<SmartPtrChecker>();
+ NullDereferenceBugTypePtr = &Checker->NullDereferenceBugType;
}
bool ento::shouldRegisterSmartPtrChecker(const CheckerManager &mgr) {
diff --git a/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
index bcc7d4103c1c..6ee7bd9252b3 100644
--- a/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
@@ -15,24 +15,31 @@
#include "SmartPtr.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclarationName.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Type.h"
+#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include <string>
using namespace clang;
using namespace ento;
namespace {
-class SmartPtrModeling : public Checker<eval::Call, check::DeadSymbols> {
+class SmartPtrModeling
+ : public Checker<eval::Call, check::DeadSymbols, check::RegionChanges,
+ check::LiveSymbols> {
- bool isNullAfterMoveMethod(const CallEvent &Call) const;
+ bool isBoolConversionMethod(const CallEvent &Call) const;
public:
// Whether the checker should model for null dereferences of smart pointers.
@@ -40,20 +47,35 @@ public:
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef State,
+ const InvalidatedSymbols *Invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const LocationContext *LCtx, const CallEvent *Call) const;
+ void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
+ const char *Sep) const override;
+ void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
private:
- ProgramStateRef updateTrackedRegion(const CallEvent &Call, CheckerContext &C,
- const MemRegion *ThisValRegion) const;
void handleReset(const CallEvent &Call, CheckerContext &C) const;
void handleRelease(const CallEvent &Call, CheckerContext &C) const;
void handleSwap(const CallEvent &Call, CheckerContext &C) const;
+ void handleGet(const CallEvent &Call, CheckerContext &C) const;
+ bool handleAssignOp(const CallEvent &Call, CheckerContext &C) const;
+ bool handleMoveCtr(const CallEvent &Call, CheckerContext &C,
+ const MemRegion *ThisRegion) const;
+ bool updateMovedSmartPointers(CheckerContext &C, const MemRegion *ThisRegion,
+ const MemRegion *OtherSmartPtrRegion) const;
+ void handleBoolConversion(const CallEvent &Call, CheckerContext &C) const;
using SmartPtrMethodHandlerFn =
void (SmartPtrModeling::*)(const CallEvent &Call, CheckerContext &) const;
CallDescriptionMap<SmartPtrMethodHandlerFn> SmartPtrMethodHandlers{
{{"reset"}, &SmartPtrModeling::handleReset},
{{"release"}, &SmartPtrModeling::handleRelease},
- {{"swap", 1}, &SmartPtrModeling::handleSwap}};
+ {{"swap", 1}, &SmartPtrModeling::handleSwap},
+ {{"get"}, &SmartPtrModeling::handleGet}};
};
} // end of anonymous namespace
@@ -81,13 +103,70 @@ bool isStdSmartPtrCall(const CallEvent &Call) {
bool isNullSmartPtr(const ProgramStateRef State, const MemRegion *ThisRegion) {
const auto *InnerPointVal = State->get<TrackedRegionMap>(ThisRegion);
- return InnerPointVal && InnerPointVal->isZeroConstant();
+ return InnerPointVal &&
+ !State->assume(InnerPointVal->castAs<DefinedOrUnknownSVal>(), true);
}
} // namespace smartptr
} // namespace ento
} // namespace clang
-bool SmartPtrModeling::isNullAfterMoveMethod(const CallEvent &Call) const {
+// If a region is removed all of the subregions need to be removed too.
+static TrackedRegionMapTy
+removeTrackedSubregions(TrackedRegionMapTy RegionMap,
+ TrackedRegionMapTy::Factory &RegionMapFactory,
+ const MemRegion *Region) {
+ if (!Region)
+ return RegionMap;
+ for (const auto &E : RegionMap) {
+ if (E.first->isSubRegionOf(Region))
+ RegionMap = RegionMapFactory.remove(RegionMap, E.first);
+ }
+ return RegionMap;
+}
+
+static ProgramStateRef updateSwappedRegion(ProgramStateRef State,
+ const MemRegion *Region,
+ const SVal *RegionInnerPointerVal) {
+ if (RegionInnerPointerVal) {
+ State = State->set<TrackedRegionMap>(Region, *RegionInnerPointerVal);
+ } else {
+ State = State->remove<TrackedRegionMap>(Region);
+ }
+ return State;
+}
+
+// Helper method to get the inner pointer type of specialized smart pointer
+// Returns empty type if not found valid inner pointer type.
+static QualType getInnerPointerType(const CallEvent &Call, CheckerContext &C) {
+ const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(Call.getDecl());
+ if (!MethodDecl || !MethodDecl->getParent())
+ return {};
+
+ const auto *RecordDecl = MethodDecl->getParent();
+ if (!RecordDecl || !RecordDecl->isInStdNamespace())
+ return {};
+
+ const auto *TSD = dyn_cast<ClassTemplateSpecializationDecl>(RecordDecl);
+ if (!TSD)
+ return {};
+
+ auto TemplateArgs = TSD->getTemplateArgs().asArray();
+ if (TemplateArgs.size() == 0)
+ return {};
+ auto InnerValueType = TemplateArgs[0].getAsType();
+ return C.getASTContext().getPointerType(InnerValueType.getCanonicalType());
+}
+
+// Helper method to pretty print region and avoid extra spacing.
+static void checkAndPrettyPrintRegion(llvm::raw_ostream &OS,
+ const MemRegion *Region) {
+ if (Region->canPrintPretty()) {
+ OS << " ";
+ Region->printPretty(OS);
+ }
+}
+
+bool SmartPtrModeling::isBoolConversionMethod(const CallEvent &Call) const {
// TODO: Update CallDescription to support anonymous calls?
// TODO: Handle other methods, such as .get() or .release().
// But once we do, we'd need a visitor to explain null dereferences
@@ -98,43 +177,93 @@ bool SmartPtrModeling::isNullAfterMoveMethod(const CallEvent &Call) const {
bool SmartPtrModeling::evalCall(const CallEvent &Call,
CheckerContext &C) const {
-
+ ProgramStateRef State = C.getState();
if (!smartptr::isStdSmartPtrCall(Call))
return false;
- if (isNullAfterMoveMethod(Call)) {
- ProgramStateRef State = C.getState();
+ if (isBoolConversionMethod(Call)) {
const MemRegion *ThisR =
cast<CXXInstanceCall>(&Call)->getCXXThisVal().getAsRegion();
- if (!move::isMovedFrom(State, ThisR)) {
- // TODO: Model this case as well. At least, avoid invalidation of globals.
- return false;
+ if (ModelSmartPtrDereference) {
+ // The check for the region is moved is duplicated in handleBoolOperation
+ // method.
+ // FIXME: Once we model std::move for smart pointers clean up this and use
+ // that modeling.
+ handleBoolConversion(Call, C);
+ return true;
+ } else {
+ if (!move::isMovedFrom(State, ThisR)) {
+ // TODO: Model this case as well. At least, avoid invalidation of
+ // globals.
+ return false;
+ }
+
+ // TODO: Add a note to bug reports describing this decision.
+ C.addTransition(State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(),
+ C.getSValBuilder().makeZeroVal(Call.getResultType())));
+
+ return true;
}
-
- // TODO: Add a note to bug reports describing this decision.
- C.addTransition(
- State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
- C.getSValBuilder().makeZeroVal(Call.getResultType())));
- return true;
}
if (!ModelSmartPtrDereference)
return false;
if (const auto *CC = dyn_cast<CXXConstructorCall>(&Call)) {
- if (CC->getDecl()->isCopyOrMoveConstructor())
+ if (CC->getDecl()->isCopyConstructor())
return false;
- const MemRegion *ThisValRegion = CC->getCXXThisVal().getAsRegion();
- if (!ThisValRegion)
+ const MemRegion *ThisRegion = CC->getCXXThisVal().getAsRegion();
+ if (!ThisRegion)
return false;
- auto State = updateTrackedRegion(Call, C, ThisValRegion);
- C.addTransition(State);
+ if (CC->getDecl()->isMoveConstructor())
+ return handleMoveCtr(Call, C, ThisRegion);
+
+ if (Call.getNumArgs() == 0) {
+ auto NullVal = C.getSValBuilder().makeNull();
+ State = State->set<TrackedRegionMap>(ThisRegion, NullVal);
+
+ C.addTransition(
+ State, C.getNoteTag([ThisRegion](PathSensitiveBugReport &BR,
+ llvm::raw_ostream &OS) {
+ if (&BR.getBugType() != smartptr::getNullDereferenceBugType() ||
+ !BR.isInteresting(ThisRegion))
+ return;
+ OS << "Default constructed smart pointer";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ OS << " is null";
+ }));
+ } else {
+ const auto *TrackingExpr = Call.getArgExpr(0);
+ assert(TrackingExpr->getType()->isPointerType() &&
+ "Adding a non pointer value to TrackedRegionMap");
+ auto ArgVal = Call.getArgSVal(0);
+ State = State->set<TrackedRegionMap>(ThisRegion, ArgVal);
+
+ C.addTransition(State, C.getNoteTag([ThisRegion, TrackingExpr,
+ ArgVal](PathSensitiveBugReport &BR,
+ llvm::raw_ostream &OS) {
+ if (&BR.getBugType() != smartptr::getNullDereferenceBugType() ||
+ !BR.isInteresting(ThisRegion))
+ return;
+ bugreporter::trackExpressionValue(BR.getErrorNode(), TrackingExpr, BR);
+ OS << "Smart pointer";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ if (ArgVal.isZeroConstant())
+ OS << " is constructed using a null value";
+ else
+ OS << " is constructed";
+ }));
+ }
return true;
}
+ if (handleAssignOp(Call, C))
+ return true;
+
const SmartPtrMethodHandlerFn *Handler = SmartPtrMethodHandlers.lookup(Call);
if (!Handler)
return false;
@@ -158,66 +287,351 @@ void SmartPtrModeling::checkDeadSymbols(SymbolReaper &SymReaper,
C.addTransition(State);
}
+void SmartPtrModeling::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+ TrackedRegionMapTy RS = State->get<TrackedRegionMap>();
+
+ if (!RS.isEmpty()) {
+ Out << Sep << "Smart ptr regions :" << NL;
+ for (auto I : RS) {
+ I.first->dumpToStream(Out);
+ if (smartptr::isNullSmartPtr(State, I.first))
+ Out << ": Null";
+ else
+ Out << ": Non Null";
+ Out << NL;
+ }
+ }
+}
+
+ProgramStateRef SmartPtrModeling::checkRegionChanges(
+ ProgramStateRef State, const InvalidatedSymbols *Invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions, const LocationContext *LCtx,
+ const CallEvent *Call) const {
+ TrackedRegionMapTy RegionMap = State->get<TrackedRegionMap>();
+ TrackedRegionMapTy::Factory &RegionMapFactory =
+ State->get_context<TrackedRegionMap>();
+ for (const auto *Region : Regions)
+ RegionMap = removeTrackedSubregions(RegionMap, RegionMapFactory,
+ Region->getBaseRegion());
+ return State->set<TrackedRegionMap>(RegionMap);
+}
+
+void SmartPtrModeling::checkLiveSymbols(ProgramStateRef State,
+ SymbolReaper &SR) const {
+ // Marking tracked symbols alive
+ TrackedRegionMapTy TrackedRegions = State->get<TrackedRegionMap>();
+ for (auto I = TrackedRegions.begin(), E = TrackedRegions.end(); I != E; ++I) {
+ SVal Val = I->second;
+ for (auto si = Val.symbol_begin(), se = Val.symbol_end(); si != se; ++si) {
+ SR.markLive(*si);
+ }
+ }
+}
+
void SmartPtrModeling::handleReset(const CallEvent &Call,
CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
const auto *IC = dyn_cast<CXXInstanceCall>(&Call);
if (!IC)
return;
- const MemRegion *ThisValRegion = IC->getCXXThisVal().getAsRegion();
- if (!ThisValRegion)
+ const MemRegion *ThisRegion = IC->getCXXThisVal().getAsRegion();
+ if (!ThisRegion)
return;
- auto State = updateTrackedRegion(Call, C, ThisValRegion);
- C.addTransition(State);
- // TODO: Make sure to ivalidate the the region in the Store if we don't have
+
+ assert(Call.getArgExpr(0)->getType()->isPointerType() &&
+ "Adding a non pointer value to TrackedRegionMap");
+ State = State->set<TrackedRegionMap>(ThisRegion, Call.getArgSVal(0));
+ const auto *TrackingExpr = Call.getArgExpr(0);
+ C.addTransition(
+ State, C.getNoteTag([ThisRegion, TrackingExpr](PathSensitiveBugReport &BR,
+ llvm::raw_ostream &OS) {
+ if (&BR.getBugType() != smartptr::getNullDereferenceBugType() ||
+ !BR.isInteresting(ThisRegion))
+ return;
+ bugreporter::trackExpressionValue(BR.getErrorNode(), TrackingExpr, BR);
+ OS << "Smart pointer";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ OS << " reset using a null value";
+ }));
+ // TODO: Make sure to ivalidate the region in the Store if we don't have
// time to model all methods.
}
void SmartPtrModeling::handleRelease(const CallEvent &Call,
CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
const auto *IC = dyn_cast<CXXInstanceCall>(&Call);
if (!IC)
return;
- const MemRegion *ThisValRegion = IC->getCXXThisVal().getAsRegion();
- if (!ThisValRegion)
+ const MemRegion *ThisRegion = IC->getCXXThisVal().getAsRegion();
+ if (!ThisRegion)
return;
- auto State = updateTrackedRegion(Call, C, ThisValRegion);
+ const auto *InnerPointVal = State->get<TrackedRegionMap>(ThisRegion);
- const auto *InnerPointVal = State->get<TrackedRegionMap>(ThisValRegion);
if (InnerPointVal) {
State = State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
*InnerPointVal);
}
- C.addTransition(State);
+
+ auto ValueToUpdate = C.getSValBuilder().makeNull();
+ State = State->set<TrackedRegionMap>(ThisRegion, ValueToUpdate);
+
+ C.addTransition(State, C.getNoteTag([ThisRegion](PathSensitiveBugReport &BR,
+ llvm::raw_ostream &OS) {
+ if (&BR.getBugType() != smartptr::getNullDereferenceBugType() ||
+ !BR.isInteresting(ThisRegion))
+ return;
+
+ OS << "Smart pointer";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ OS << " is released and set to null";
+ }));
// TODO: Add support to enable MallocChecker to start tracking the raw
// pointer.
}
void SmartPtrModeling::handleSwap(const CallEvent &Call,
CheckerContext &C) const {
- // TODO: Add support to handle swap method.
+ // To model unique_ptr::swap() method.
+ const auto *IC = dyn_cast<CXXInstanceCall>(&Call);
+ if (!IC)
+ return;
+
+ const MemRegion *ThisRegion = IC->getCXXThisVal().getAsRegion();
+ if (!ThisRegion)
+ return;
+
+ const auto *ArgRegion = Call.getArgSVal(0).getAsRegion();
+ if (!ArgRegion)
+ return;
+
+ auto State = C.getState();
+ const auto *ThisRegionInnerPointerVal =
+ State->get<TrackedRegionMap>(ThisRegion);
+ const auto *ArgRegionInnerPointerVal =
+ State->get<TrackedRegionMap>(ArgRegion);
+
+ // Swap the tracked region values.
+ State = updateSwappedRegion(State, ThisRegion, ArgRegionInnerPointerVal);
+ State = updateSwappedRegion(State, ArgRegion, ThisRegionInnerPointerVal);
+
+ C.addTransition(
+ State, C.getNoteTag([ThisRegion, ArgRegion](PathSensitiveBugReport &BR,
+ llvm::raw_ostream &OS) {
+ if (&BR.getBugType() != smartptr::getNullDereferenceBugType() ||
+ !BR.isInteresting(ThisRegion))
+ return;
+ BR.markInteresting(ArgRegion);
+ OS << "Swapped null smart pointer";
+ checkAndPrettyPrintRegion(OS, ArgRegion);
+ OS << " with smart pointer";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ }));
}
-ProgramStateRef
-SmartPtrModeling::updateTrackedRegion(const CallEvent &Call, CheckerContext &C,
- const MemRegion *ThisValRegion) const {
- // TODO: Refactor and clean up handling too many things.
+void SmartPtrModeling::handleGet(const CallEvent &Call,
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
- auto NumArgs = Call.getNumArgs();
-
- if (NumArgs == 0) {
- auto NullSVal = C.getSValBuilder().makeNull();
- State = State->set<TrackedRegionMap>(ThisValRegion, NullSVal);
- } else if (NumArgs == 1) {
- auto ArgVal = Call.getArgSVal(0);
- assert(Call.getArgExpr(0)->getType()->isPointerType() &&
- "Adding a non pointer value to TrackedRegionMap");
- State = State->set<TrackedRegionMap>(ThisValRegion, ArgVal);
+ const auto *IC = dyn_cast<CXXInstanceCall>(&Call);
+ if (!IC)
+ return;
+
+ const MemRegion *ThisRegion = IC->getCXXThisVal().getAsRegion();
+ if (!ThisRegion)
+ return;
+
+ SVal InnerPointerVal;
+ if (const auto *InnerValPtr = State->get<TrackedRegionMap>(ThisRegion)) {
+ InnerPointerVal = *InnerValPtr;
+ } else {
+ const auto *CallExpr = Call.getOriginExpr();
+ InnerPointerVal = C.getSValBuilder().conjureSymbolVal(
+ CallExpr, C.getLocationContext(), Call.getResultType(), C.blockCount());
+ State = State->set<TrackedRegionMap>(ThisRegion, InnerPointerVal);
}
- return State;
+ State = State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
+ InnerPointerVal);
+ // TODO: Add NoteTag, for how the raw pointer got using 'get' method.
+ C.addTransition(State);
+}
+
+bool SmartPtrModeling::handleAssignOp(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ const auto *OC = dyn_cast<CXXMemberOperatorCall>(&Call);
+ if (!OC)
+ return false;
+ OverloadedOperatorKind OOK = OC->getOverloadedOperator();
+ if (OOK != OO_Equal)
+ return false;
+ const MemRegion *ThisRegion = OC->getCXXThisVal().getAsRegion();
+ if (!ThisRegion)
+ return false;
+
+ const MemRegion *OtherSmartPtrRegion = OC->getArgSVal(0).getAsRegion();
+ // In case of 'nullptr' or '0' assigned
+ if (!OtherSmartPtrRegion) {
+ bool AssignedNull = Call.getArgSVal(0).isZeroConstant();
+ if (!AssignedNull)
+ return false;
+ auto NullVal = C.getSValBuilder().makeNull();
+ State = State->set<TrackedRegionMap>(ThisRegion, NullVal);
+ C.addTransition(State, C.getNoteTag([ThisRegion](PathSensitiveBugReport &BR,
+ llvm::raw_ostream &OS) {
+ if (&BR.getBugType() != smartptr::getNullDereferenceBugType() ||
+ !BR.isInteresting(ThisRegion))
+ return;
+ OS << "Smart pointer";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ OS << " is assigned to null";
+ }));
+ return true;
+ }
+
+ return updateMovedSmartPointers(C, ThisRegion, OtherSmartPtrRegion);
+}
+
+bool SmartPtrModeling::handleMoveCtr(const CallEvent &Call, CheckerContext &C,
+ const MemRegion *ThisRegion) const {
+ const auto *OtherSmartPtrRegion = Call.getArgSVal(0).getAsRegion();
+ if (!OtherSmartPtrRegion)
+ return false;
+
+ return updateMovedSmartPointers(C, ThisRegion, OtherSmartPtrRegion);
+}
+
+bool SmartPtrModeling::updateMovedSmartPointers(
+ CheckerContext &C, const MemRegion *ThisRegion,
+ const MemRegion *OtherSmartPtrRegion) const {
+ ProgramStateRef State = C.getState();
+ const auto *OtherInnerPtr = State->get<TrackedRegionMap>(OtherSmartPtrRegion);
+ if (OtherInnerPtr) {
+ State = State->set<TrackedRegionMap>(ThisRegion, *OtherInnerPtr);
+ auto NullVal = C.getSValBuilder().makeNull();
+ State = State->set<TrackedRegionMap>(OtherSmartPtrRegion, NullVal);
+ bool IsArgValNull = OtherInnerPtr->isZeroConstant();
+
+ C.addTransition(
+ State,
+ C.getNoteTag([ThisRegion, OtherSmartPtrRegion, IsArgValNull](
+ PathSensitiveBugReport &BR, llvm::raw_ostream &OS) {
+ if (&BR.getBugType() != smartptr::getNullDereferenceBugType())
+ return;
+ if (BR.isInteresting(OtherSmartPtrRegion)) {
+ OS << "Smart pointer";
+ checkAndPrettyPrintRegion(OS, OtherSmartPtrRegion);
+ OS << " is null after being moved to";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ }
+ if (BR.isInteresting(ThisRegion) && IsArgValNull) {
+ OS << "A null pointer value is moved to";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ BR.markInteresting(OtherSmartPtrRegion);
+ }
+ }));
+ return true;
+ } else {
+ // In case we dont know anything about value we are moving from
+ // remove the entry from map for which smart pointer got moved to.
+ auto NullVal = C.getSValBuilder().makeNull();
+ State = State->remove<TrackedRegionMap>(ThisRegion);
+ State = State->set<TrackedRegionMap>(OtherSmartPtrRegion, NullVal);
+ C.addTransition(State, C.getNoteTag([OtherSmartPtrRegion,
+ ThisRegion](PathSensitiveBugReport &BR,
+ llvm::raw_ostream &OS) {
+ if (&BR.getBugType() != smartptr::getNullDereferenceBugType() ||
+ !BR.isInteresting(OtherSmartPtrRegion))
+ return;
+ OS << "Smart pointer";
+ checkAndPrettyPrintRegion(OS, OtherSmartPtrRegion);
+ OS << " is null after; previous value moved to";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ }));
+ return true;
+ }
+ return false;
+}
+
+void SmartPtrModeling::handleBoolConversion(const CallEvent &Call,
+ CheckerContext &C) const {
+ // To model unique_ptr::operator bool
+ ProgramStateRef State = C.getState();
+ const Expr *CallExpr = Call.getOriginExpr();
+ const MemRegion *ThisRegion =
+ cast<CXXInstanceCall>(&Call)->getCXXThisVal().getAsRegion();
+
+ SVal InnerPointerVal;
+ if (const auto *InnerValPtr = State->get<TrackedRegionMap>(ThisRegion)) {
+ InnerPointerVal = *InnerValPtr;
+ } else {
+ // In case of inner pointer SVal is not available we create
+ // conjureSymbolVal for inner pointer value.
+ auto InnerPointerType = getInnerPointerType(Call, C);
+ if (InnerPointerType.isNull())
+ return;
+
+ const LocationContext *LC = C.getLocationContext();
+ InnerPointerVal = C.getSValBuilder().conjureSymbolVal(
+ CallExpr, LC, InnerPointerType, C.blockCount());
+ State = State->set<TrackedRegionMap>(ThisRegion, InnerPointerVal);
+ }
+
+ if (State->isNull(InnerPointerVal).isConstrainedTrue()) {
+ State = State->BindExpr(CallExpr, C.getLocationContext(),
+ C.getSValBuilder().makeTruthVal(false));
+
+ C.addTransition(State);
+ return;
+ } else if (State->isNonNull(InnerPointerVal).isConstrainedTrue()) {
+ State = State->BindExpr(CallExpr, C.getLocationContext(),
+ C.getSValBuilder().makeTruthVal(true));
+
+ C.addTransition(State);
+ return;
+ } else if (move::isMovedFrom(State, ThisRegion)) {
+ C.addTransition(
+ State->BindExpr(CallExpr, C.getLocationContext(),
+ C.getSValBuilder().makeZeroVal(Call.getResultType())));
+ return;
+ } else {
+ ProgramStateRef NotNullState, NullState;
+ std::tie(NotNullState, NullState) =
+ State->assume(InnerPointerVal.castAs<DefinedOrUnknownSVal>());
+
+ auto NullVal = C.getSValBuilder().makeNull();
+ // Explicitly tracking the region as null.
+ NullState = NullState->set<TrackedRegionMap>(ThisRegion, NullVal);
+
+ NullState = NullState->BindExpr(CallExpr, C.getLocationContext(),
+ C.getSValBuilder().makeTruthVal(false));
+ C.addTransition(NullState, C.getNoteTag(
+ [ThisRegion](PathSensitiveBugReport &BR,
+ llvm::raw_ostream &OS) {
+ OS << "Assuming smart pointer";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ OS << " is null";
+ },
+ /*IsPrunable=*/true));
+ NotNullState =
+ NotNullState->BindExpr(CallExpr, C.getLocationContext(),
+ C.getSValBuilder().makeTruthVal(true));
+ C.addTransition(
+ NotNullState,
+ C.getNoteTag(
+ [ThisRegion](PathSensitiveBugReport &BR, llvm::raw_ostream &OS) {
+ OS << "Assuming smart pointer";
+ checkAndPrettyPrintRegion(OS, ThisRegion);
+ OS << " is non-null";
+ },
+ /*IsPrunable=*/true));
+ return;
+ }
}
void ento::registerSmartPtrModeling(CheckerManager &Mgr) {
diff --git a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index 8b575f4f4759..d1c366a94fac 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -40,12 +40,12 @@
//
// The following standard C functions are currently supported:
//
-// fgetc getline isdigit isupper
+// fgetc getline isdigit isupper toascii
// fread isalnum isgraph isxdigit
// fwrite isalpha islower read
// getc isascii isprint write
-// getchar isblank ispunct
-// getdelim iscntrl isspace
+// getchar isblank ispunct toupper
+// getdelim iscntrl isspace tolower
//
//===----------------------------------------------------------------------===//
@@ -126,6 +126,8 @@ class StdLibraryFunctionsChecker
}
ArgNo getArgNo() const { return ArgN; }
+ virtual StringRef getName() const = 0;
+
protected:
ArgNo ArgN; // Argument to which we apply the constraint.
@@ -138,18 +140,25 @@ class StdLibraryFunctionsChecker
/// Given a range, should the argument stay inside or outside this range?
enum RangeKind { OutOfRange, WithinRange };
- /// Encapsulates a single range on a single symbol within a branch.
+ /// Encapsulates a range on a single symbol.
class RangeConstraint : public ValueConstraint {
- RangeKind Kind; // Kind of range definition.
- IntRangeVector Args; // Polymorphic arguments.
+ RangeKind Kind;
+ // A range is formed as a set of intervals (sub-ranges).
+ // E.g. {['A', 'Z'], ['a', 'z']}
+ //
+ // The default constructed RangeConstraint has an empty range set, applying
+ // such constraint does not involve any assumptions, thus the State remains
+ // unchanged. This is meaningful, if the range is dependent on a looked up
+ // type (e.g. [0, Socklen_tMax]). If the type is not found, then the range
+ // is default initialized to be empty.
+ IntRangeVector Ranges;
public:
- RangeConstraint(ArgNo ArgN, RangeKind Kind, const IntRangeVector &Args)
- : ValueConstraint(ArgN), Kind(Kind), Args(Args) {}
+ StringRef getName() const override { return "Range"; }
+ RangeConstraint(ArgNo ArgN, RangeKind Kind, const IntRangeVector &Ranges)
+ : ValueConstraint(ArgN), Kind(Kind), Ranges(Ranges) {}
- const IntRangeVector &getRanges() const {
- return Args;
- }
+ const IntRangeVector &getRanges() const { return Ranges; }
private:
ProgramStateRef applyAsOutOfRange(ProgramStateRef State,
@@ -158,6 +167,7 @@ class StdLibraryFunctionsChecker
ProgramStateRef applyAsWithinRange(ProgramStateRef State,
const CallEvent &Call,
const Summary &Summary) const;
+
public:
ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
const Summary &Summary,
@@ -198,6 +208,7 @@ class StdLibraryFunctionsChecker
ArgNo OtherArgN;
public:
+ virtual StringRef getName() const override { return "Comparison"; };
ComparisonConstraint(ArgNo ArgN, BinaryOperator::Opcode Opcode,
ArgNo OtherArgN)
: ValueConstraint(ArgN), Opcode(Opcode), OtherArgN(OtherArgN) {}
@@ -214,6 +225,7 @@ class StdLibraryFunctionsChecker
bool CannotBeNull = true;
public:
+ StringRef getName() const override { return "NonNull"; }
ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
const Summary &Summary,
CheckerContext &C) const override {
@@ -242,15 +254,21 @@ class StdLibraryFunctionsChecker
}
};
- // Represents a buffer argument with an additional size argument.
- // E.g. the first two arguments here:
+ // Represents a buffer argument with an additional size constraint. The
+ // constraint may be a concrete value, or a symbolic value in an argument.
+ // Example 1. Concrete value as the minimum buffer size.
+ // char *asctime_r(const struct tm *restrict tm, char *restrict buf);
+ // // `buf` size must be at least 26 bytes according the POSIX standard.
+ // Example 2. Argument as a buffer size.
// ctime_s(char *buffer, rsize_t bufsz, const time_t *time);
- // Another example:
+ // Example 3. The size is computed as a multiplication of other args.
// size_t fread(void *ptr, size_t size, size_t nmemb, FILE *stream);
// // Here, ptr is the buffer, and its minimum size is `size * nmemb`.
class BufferSizeConstraint : public ValueConstraint {
+ // The concrete value which is the minimum size for the buffer.
+ llvm::Optional<llvm::APSInt> ConcreteSize;
// The argument which holds the size of the buffer.
- ArgNo SizeArgN;
+ llvm::Optional<ArgNo> SizeArgN;
// The argument which is a multiplier to size. This is set in case of
// `fread` like functions where the size is computed as a multiplication of
// two arguments.
@@ -259,9 +277,11 @@ class StdLibraryFunctionsChecker
BinaryOperator::Opcode Op = BO_LE;
public:
+ StringRef getName() const override { return "BufferSize"; }
+ BufferSizeConstraint(ArgNo Buffer, llvm::APSInt BufMinSize)
+ : ValueConstraint(Buffer), ConcreteSize(BufMinSize) {}
BufferSizeConstraint(ArgNo Buffer, ArgNo BufSize)
: ValueConstraint(Buffer), SizeArgN(BufSize) {}
-
BufferSizeConstraint(ArgNo Buffer, ArgNo BufSize, ArgNo BufSizeMultiplier)
: ValueConstraint(Buffer), SizeArgN(BufSize),
SizeMultiplierArgN(BufSizeMultiplier) {}
@@ -272,14 +292,27 @@ class StdLibraryFunctionsChecker
SValBuilder &SvalBuilder = C.getSValBuilder();
// The buffer argument.
SVal BufV = getArgSVal(Call, getArgNo());
- // The size argument.
- SVal SizeV = getArgSVal(Call, SizeArgN);
- // Multiply with another argument if given.
- if (SizeMultiplierArgN) {
- SVal SizeMulV = getArgSVal(Call, *SizeMultiplierArgN);
- SizeV = SvalBuilder.evalBinOp(State, BO_Mul, SizeV, SizeMulV,
- Summary.getArgType(SizeArgN));
- }
+
+ // Get the size constraint.
+ const SVal SizeV = [this, &State, &Call, &Summary, &SvalBuilder]() {
+ if (ConcreteSize) {
+ return SVal(SvalBuilder.makeIntVal(*ConcreteSize));
+ } else if (SizeArgN) {
+ // The size argument.
+ SVal SizeV = getArgSVal(Call, *SizeArgN);
+ // Multiply with another argument if given.
+ if (SizeMultiplierArgN) {
+ SVal SizeMulV = getArgSVal(Call, *SizeMultiplierArgN);
+ SizeV = SvalBuilder.evalBinOp(State, BO_Mul, SizeV, SizeMulV,
+ Summary.getArgType(*SizeArgN));
+ }
+ return SizeV;
+ } else {
+ llvm_unreachable("The constraint must be either a concrete value or "
+ "encoded in an arguement.");
+ }
+ }();
+
// The dynamic size of the buffer argument, got from the analyzer engine.
SVal BufDynSize = getDynamicSizeWithOffset(State, BufV);
@@ -302,12 +335,20 @@ class StdLibraryFunctionsChecker
Tmp.Op = BinaryOperator::negateComparisonOp(Op);
return std::make_shared<BufferSizeConstraint>(Tmp);
}
+
+ bool checkSpecificValidity(const FunctionDecl *FD) const override {
+ const bool ValidArg = getArgType(FD, ArgN)->isPointerType();
+ assert(ValidArg &&
+ "This constraint should be applied only on a pointer type");
+ return ValidArg;
+ }
};
/// The complete list of constraints that defines a single branch.
typedef std::vector<ValueConstraintPtr> ConstraintSet;
- using ArgTypes = std::vector<QualType>;
+ using ArgTypes = std::vector<Optional<QualType>>;
+ using RetType = Optional<QualType>;
// A placeholder type, we use it whenever we do not care about the concrete
// type in a Signature.
@@ -317,16 +358,37 @@ class StdLibraryFunctionsChecker
// The signature of a function we want to describe with a summary. This is a
// concessive signature, meaning there may be irrelevant types in the
// signature which we do not check against a function with concrete types.
- struct Signature {
- const ArgTypes ArgTys;
- const QualType RetTy;
- Signature(ArgTypes ArgTys, QualType RetTy) : ArgTys(ArgTys), RetTy(RetTy) {
- assertRetTypeSuitableForSignature(RetTy);
- for (size_t I = 0, E = ArgTys.size(); I != E; ++I) {
- QualType ArgTy = ArgTys[I];
- assertArgTypeSuitableForSignature(ArgTy);
+ // All types in the spec need to be canonical.
+ class Signature {
+ using ArgQualTypes = std::vector<QualType>;
+ ArgQualTypes ArgTys;
+ QualType RetTy;
+ // True if any component type is not found by lookup.
+ bool Invalid = false;
+
+ public:
+ // Construct a signature from optional types. If any of the optional types
+ // are not set then the signature will be invalid.
+ Signature(ArgTypes ArgTys, RetType RetTy) {
+ for (Optional<QualType> Arg : ArgTys) {
+ if (!Arg) {
+ Invalid = true;
+ return;
+ } else {
+ assertArgTypeSuitableForSignature(*Arg);
+ this->ArgTys.push_back(*Arg);
+ }
+ }
+ if (!RetTy) {
+ Invalid = true;
+ return;
+ } else {
+ assertRetTypeSuitableForSignature(*RetTy);
+ this->RetTy = *RetTy;
}
}
+
+ bool isInvalid() const { return Invalid; }
bool matches(const FunctionDecl *FD) const;
private:
@@ -380,7 +442,6 @@ class StdLibraryFunctionsChecker
/// rules for the given parameter's type, those rules are checked once the
/// signature is matched.
class Summary {
- const Signature Sign;
const InvalidationKind InvalidationKd;
Cases CaseConstraints;
ConstraintSet ArgConstraints;
@@ -390,14 +451,19 @@ class StdLibraryFunctionsChecker
const FunctionDecl *FD = nullptr;
public:
- Summary(ArgTypes ArgTys, QualType RetTy, InvalidationKind InvalidationKd)
- : Sign(ArgTys, RetTy), InvalidationKd(InvalidationKd) {}
+ Summary(InvalidationKind InvalidationKd) : InvalidationKd(InvalidationKd) {}
- Summary &Case(ConstraintSet&& CS) {
+ Summary &Case(ConstraintSet &&CS) {
CaseConstraints.push_back(std::move(CS));
return *this;
}
+ Summary &Case(const ConstraintSet &CS) {
+ CaseConstraints.push_back(CS);
+ return *this;
+ }
Summary &ArgConstraint(ValueConstraintPtr VC) {
+ assert(VC->getArgNo() != Ret &&
+ "Arg constraint should not refer to the return value");
ArgConstraints.push_back(VC);
return *this;
}
@@ -412,7 +478,7 @@ class StdLibraryFunctionsChecker
// Returns true if the summary should be applied to the given function.
// And if yes then store the function declaration.
- bool matchesAndSet(const FunctionDecl *FD) {
+ bool matchesAndSet(const Signature &Sign, const FunctionDecl *FD) {
bool Result = Sign.matches(FD) && validateByConstraints(FD);
if (Result) {
assert(!this->FD && "FD must not be set more than once");
@@ -472,17 +538,24 @@ private:
void initFunctionSummaries(CheckerContext &C) const;
void reportBug(const CallEvent &Call, ExplodedNode *N,
- CheckerContext &C) const {
+ const ValueConstraint *VC, CheckerContext &C) const {
if (!ChecksEnabled[CK_StdCLibraryFunctionArgsChecker])
return;
- // TODO Add detailed diagnostic.
- StringRef Msg = "Function argument constraint is not satisfied";
+ // TODO Add more detailed diagnostic.
+ std::string Msg =
+ (Twine("Function argument constraint is not satisfied, constraint: ") +
+ VC->getName().data() + ", ArgN: " + Twine(VC->getArgNo()))
+ .str();
if (!BT_InvalidArg)
BT_InvalidArg = std::make_unique<BugType>(
CheckNames[CK_StdCLibraryFunctionArgsChecker],
"Unsatisfied argument constraints", categories::LogicError);
auto R = std::make_unique<PathSensitiveBugReport>(*BT_InvalidArg, Msg, N);
- bugreporter::trackExpressionValue(N, Call.getArgExpr(0), *R);
+ bugreporter::trackExpressionValue(N, Call.getArgExpr(VC->getArgNo()), *R);
+
+ // Highlight the range of the argument that was violated.
+ R->addRange(Call.getArgSourceRange(VC->getArgNo()));
+
C.emitReport(std::move(R));
}
};
@@ -495,6 +568,8 @@ const StdLibraryFunctionsChecker::ArgNo StdLibraryFunctionsChecker::Ret =
ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsOutOfRange(
ProgramStateRef State, const CallEvent &Call,
const Summary &Summary) const {
+ if (Ranges.empty())
+ return State;
ProgramStateManager &Mgr = State->getStateManager();
SValBuilder &SVB = Mgr.getSValBuilder();
@@ -522,6 +597,8 @@ ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsOutOfRange(
ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsWithinRange(
ProgramStateRef State, const CallEvent &Call,
const Summary &Summary) const {
+ if (Ranges.empty())
+ return State;
ProgramStateManager &Mgr = State->getStateManager();
SValBuilder &SVB = Mgr.getSValBuilder();
@@ -615,7 +692,7 @@ void StdLibraryFunctionsChecker::checkPreCall(const CallEvent &Call,
// The argument constraint is not satisfied.
if (FailureSt && !SuccessSt) {
if (ExplodedNode *N = C.generateErrorNode(NewState))
- reportBug(Call, N, C);
+ reportBug(Call, N, Constraint.get(), C);
break;
} else {
// We will apply the constraint even if we cannot reason about the
@@ -665,7 +742,7 @@ bool StdLibraryFunctionsChecker::evalCall(const CallEvent &Call,
case EvalCallAsPure: {
ProgramStateRef State = C.getState();
const LocationContext *LC = C.getLocationContext();
- const auto *CE = cast_or_null<CallExpr>(Call.getOriginExpr());
+ const auto *CE = cast<CallExpr>(Call.getOriginExpr());
SVal V = C.getSValBuilder().conjureSymbolVal(
CE, LC, CE->getType().getCanonicalType(), C.blockCount());
State = State->BindExpr(CE, LC, V);
@@ -682,21 +759,39 @@ bool StdLibraryFunctionsChecker::evalCall(const CallEvent &Call,
bool StdLibraryFunctionsChecker::Signature::matches(
const FunctionDecl *FD) const {
- // Check number of arguments:
+ assert(!isInvalid());
+ // Check the number of arguments.
if (FD->param_size() != ArgTys.size())
return false;
- // Check return type.
- if (!isIrrelevant(RetTy))
- if (RetTy != FD->getReturnType().getCanonicalType())
+ // The "restrict" keyword is illegal in C++, however, many libc
+ // implementations use the "__restrict" compiler intrinsic in functions
+ // prototypes. The "__restrict" keyword qualifies a type as a restricted type
+ // even in C++.
+ // In case of any non-C99 languages, we don't want to match based on the
+ // restrict qualifier because we cannot know if the given libc implementation
+ // qualifies the paramter type or not.
+ auto RemoveRestrict = [&FD](QualType T) {
+ if (!FD->getASTContext().getLangOpts().C99)
+ T.removeLocalRestrict();
+ return T;
+ };
+
+ // Check the return type.
+ if (!isIrrelevant(RetTy)) {
+ QualType FDRetTy = RemoveRestrict(FD->getReturnType().getCanonicalType());
+ if (RetTy != FDRetTy)
return false;
+ }
- // Check argument types.
+ // Check the argument types.
for (size_t I = 0, E = ArgTys.size(); I != E; ++I) {
QualType ArgTy = ArgTys[I];
if (isIrrelevant(ArgTy))
continue;
- if (ArgTy != FD->getParamDecl(I)->getType().getCanonicalType())
+ QualType FDArgTy =
+ RemoveRestrict(FD->getParamDecl(I)->getType().getCanonicalType());
+ if (ArgTy != FDArgTy)
return false;
}
@@ -726,32 +821,6 @@ StdLibraryFunctionsChecker::findFunctionSummary(const CallEvent &Call,
return findFunctionSummary(FD, C);
}
-static llvm::Optional<QualType> lookupType(StringRef Name,
- const ASTContext &ACtx) {
- IdentifierInfo &II = ACtx.Idents.get(Name);
- auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
- if (LookupRes.size() == 0)
- return None;
-
- // Prioritze typedef declarations.
- // This is needed in case of C struct typedefs. E.g.:
- // typedef struct FILE FILE;
- // In this case, we have a RecordDecl 'struct FILE' with the name 'FILE' and
- // we have a TypedefDecl with the name 'FILE'.
- for (Decl *D : LookupRes)
- if (auto *TD = dyn_cast<TypedefNameDecl>(D))
- return ACtx.getTypeDeclType(TD).getCanonicalType();
-
- // Find the first TypeDecl.
- // There maybe cases when a function has the same name as a struct.
- // E.g. in POSIX: `struct stat` and the function `stat()`:
- // int stat(const char *restrict path, struct stat *restrict buf);
- for (Decl *D : LookupRes)
- if (auto *TD = dyn_cast<TypeDecl>(D))
- return ACtx.getTypeDeclType(TD).getCanonicalType();
- return None;
-}
-
void StdLibraryFunctionsChecker::initFunctionSummaries(
CheckerContext &C) const {
if (!FunctionSummaryMap.empty())
@@ -761,6 +830,91 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
BasicValueFactory &BVF = SVB.getBasicValueFactory();
const ASTContext &ACtx = BVF.getContext();
+ // Helper class to lookup a type by its name.
+ class LookupType {
+ const ASTContext &ACtx;
+
+ public:
+ LookupType(const ASTContext &ACtx) : ACtx(ACtx) {}
+
+ // Find the type. If not found then the optional is not set.
+ llvm::Optional<QualType> operator()(StringRef Name) {
+ IdentifierInfo &II = ACtx.Idents.get(Name);
+ auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
+ if (LookupRes.size() == 0)
+ return None;
+
+ // Prioritze typedef declarations.
+ // This is needed in case of C struct typedefs. E.g.:
+ // typedef struct FILE FILE;
+ // In this case, we have a RecordDecl 'struct FILE' with the name 'FILE'
+ // and we have a TypedefDecl with the name 'FILE'.
+ for (Decl *D : LookupRes)
+ if (auto *TD = dyn_cast<TypedefNameDecl>(D))
+ return ACtx.getTypeDeclType(TD).getCanonicalType();
+
+ // Find the first TypeDecl.
+ // There maybe cases when a function has the same name as a struct.
+ // E.g. in POSIX: `struct stat` and the function `stat()`:
+ // int stat(const char *restrict path, struct stat *restrict buf);
+ for (Decl *D : LookupRes)
+ if (auto *TD = dyn_cast<TypeDecl>(D))
+ return ACtx.getTypeDeclType(TD).getCanonicalType();
+ return None;
+ }
+ } lookupTy(ACtx);
+
+ // Below are auxiliary classes to handle optional types that we get as a
+ // result of the lookup.
+ class GetRestrictTy {
+ const ASTContext &ACtx;
+
+ public:
+ GetRestrictTy(const ASTContext &ACtx) : ACtx(ACtx) {}
+ QualType operator()(QualType Ty) {
+ return ACtx.getLangOpts().C99 ? ACtx.getRestrictType(Ty) : Ty;
+ }
+ Optional<QualType> operator()(Optional<QualType> Ty) {
+ if (Ty)
+ return operator()(*Ty);
+ return None;
+ }
+ } getRestrictTy(ACtx);
+ class GetPointerTy {
+ const ASTContext &ACtx;
+
+ public:
+ GetPointerTy(const ASTContext &ACtx) : ACtx(ACtx) {}
+ QualType operator()(QualType Ty) { return ACtx.getPointerType(Ty); }
+ Optional<QualType> operator()(Optional<QualType> Ty) {
+ if (Ty)
+ return operator()(*Ty);
+ return None;
+ }
+ } getPointerTy(ACtx);
+ class {
+ public:
+ Optional<QualType> operator()(Optional<QualType> Ty) {
+ return Ty ? Optional<QualType>(Ty->withConst()) : None;
+ }
+ QualType operator()(QualType Ty) { return Ty.withConst(); }
+ } getConstTy;
+ class GetMaxValue {
+ BasicValueFactory &BVF;
+
+ public:
+ GetMaxValue(BasicValueFactory &BVF) : BVF(BVF) {}
+ Optional<RangeInt> operator()(QualType Ty) {
+ return BVF.getMaxValue(Ty).getLimitedValue();
+ }
+ Optional<RangeInt> operator()(Optional<QualType> Ty) {
+ if (Ty) {
+ return operator()(*Ty);
+ }
+ return None;
+ }
+ } getMaxValue(BVF);
+
// These types are useful for writing specifications quickly,
// New specifications should probably introduce more types.
// Some types are hard to obtain from the AST, eg. "ssize_t".
@@ -769,44 +923,36 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// or long long, so three summary variants would be enough).
// Of course, function variants are also useful for C++ overloads.
const QualType VoidTy = ACtx.VoidTy;
+ const QualType CharTy = ACtx.CharTy;
+ const QualType WCharTy = ACtx.WCharTy;
const QualType IntTy = ACtx.IntTy;
const QualType UnsignedIntTy = ACtx.UnsignedIntTy;
const QualType LongTy = ACtx.LongTy;
- const QualType LongLongTy = ACtx.LongLongTy;
const QualType SizeTy = ACtx.getSizeType();
- const QualType VoidPtrTy = ACtx.VoidPtrTy; // void *
- const QualType IntPtrTy = ACtx.getPointerType(IntTy); // int *
+ const QualType VoidPtrTy = getPointerTy(VoidTy); // void *
+ const QualType IntPtrTy = getPointerTy(IntTy); // int *
const QualType UnsignedIntPtrTy =
- ACtx.getPointerType(UnsignedIntTy); // unsigned int *
- const QualType VoidPtrRestrictTy =
- ACtx.getLangOpts().C99 ? ACtx.getRestrictType(VoidPtrTy) // void *restrict
- : VoidPtrTy;
+ getPointerTy(UnsignedIntTy); // unsigned int *
+ const QualType VoidPtrRestrictTy = getRestrictTy(VoidPtrTy);
const QualType ConstVoidPtrTy =
- ACtx.getPointerType(ACtx.VoidTy.withConst()); // const void *
- const QualType CharPtrTy = ACtx.getPointerType(ACtx.CharTy); // char *
- const QualType CharPtrRestrictTy =
- ACtx.getLangOpts().C99 ? ACtx.getRestrictType(CharPtrTy) // char *restrict
- : CharPtrTy;
+ getPointerTy(getConstTy(VoidTy)); // const void *
+ const QualType CharPtrTy = getPointerTy(CharTy); // char *
+ const QualType CharPtrRestrictTy = getRestrictTy(CharPtrTy);
const QualType ConstCharPtrTy =
- ACtx.getPointerType(ACtx.CharTy.withConst()); // const char *
- const QualType ConstCharPtrRestrictTy =
- ACtx.getLangOpts().C99
- ? ACtx.getRestrictType(ConstCharPtrTy) // const char *restrict
- : ConstCharPtrTy;
- const QualType Wchar_tPtrTy = ACtx.getPointerType(ACtx.WCharTy); // wchar_t *
+ getPointerTy(getConstTy(CharTy)); // const char *
+ const QualType ConstCharPtrRestrictTy = getRestrictTy(ConstCharPtrTy);
+ const QualType Wchar_tPtrTy = getPointerTy(WCharTy); // wchar_t *
const QualType ConstWchar_tPtrTy =
- ACtx.getPointerType(ACtx.WCharTy.withConst()); // const wchar_t *
- const QualType ConstVoidPtrRestrictTy =
- ACtx.getLangOpts().C99
- ? ACtx.getRestrictType(ConstVoidPtrTy) // const void *restrict
- : ConstVoidPtrTy;
+ getPointerTy(getConstTy(WCharTy)); // const wchar_t *
+ const QualType ConstVoidPtrRestrictTy = getRestrictTy(ConstVoidPtrTy);
+ const QualType SizePtrTy = getPointerTy(SizeTy);
+ const QualType SizePtrRestrictTy = getRestrictTy(SizePtrTy);
const RangeInt IntMax = BVF.getMaxValue(IntTy).getLimitedValue();
const RangeInt UnsignedIntMax =
BVF.getMaxValue(UnsignedIntTy).getLimitedValue();
const RangeInt LongMax = BVF.getMaxValue(LongTy).getLimitedValue();
- const RangeInt LongLongMax = BVF.getMaxValue(LongLongTy).getLimitedValue();
const RangeInt SizeMax = BVF.getMaxValue(SizeTy).getLimitedValue();
// Set UCharRangeMax to min of int or uchar maximum value.
@@ -840,15 +986,19 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// Add a summary to a FunctionDecl found by lookup. The lookup is performed
// by the given Name, and in the global scope. The summary will be attached
// to the found FunctionDecl only if the signatures match.
- void operator()(StringRef Name, Summary S) {
+ //
+ // Returns true if the summary has been added, false otherwise.
+ bool operator()(StringRef Name, Signature Sign, Summary Sum) {
+ if (Sign.isInvalid())
+ return false;
IdentifierInfo &II = ACtx.Idents.get(Name);
auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
if (LookupRes.size() == 0)
- return;
+ return false;
for (Decl *D : LookupRes) {
if (auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (S.matchesAndSet(FD)) {
- auto Res = Map.insert({FD->getCanonicalDecl(), S});
+ if (Sum.matchesAndSet(Sign, FD)) {
+ auto Res = Map.insert({FD->getCanonicalDecl(), Sum});
assert(Res.second && "Function already has a summary set!");
(void)Res;
if (DisplayLoadedSummaries) {
@@ -856,44 +1006,20 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
FD->print(llvm::errs());
llvm::errs() << "\n";
}
- return;
+ return true;
}
}
}
+ return false;
}
- // Add several summaries for the given name.
- void operator()(StringRef Name, const std::vector<Summary> &Summaries) {
- for (const Summary &S : Summaries)
- operator()(Name, S);
+ // Add the same summary for different names with the Signature explicitly
+ // given.
+ void operator()(std::vector<StringRef> Names, Signature Sign, Summary Sum) {
+ for (StringRef Name : Names)
+ operator()(Name, Sign, Sum);
}
} addToFunctionSummaryMap(ACtx, FunctionSummaryMap, DisplayLoadedSummaries);
- // We are finally ready to define specifications for all supported functions.
- //
- // The signature needs to have the correct number of arguments.
- // However, we insert `Irrelevant' when the type is insignificant.
- //
- // Argument ranges should always cover all variants. If return value
- // is completely unknown, omit it from the respective range set.
- //
- // All types in the spec need to be canonical.
- //
- // Every item in the list of range sets represents a particular
- // execution path the analyzer would need to explore once
- // the call is modeled - a new program state is constructed
- // for every range set, and each range line in the range set
- // corresponds to a specific constraint within this state.
- //
- // Upon comparing to another argument, the other argument is casted
- // to the current argument's type. This avoids proper promotion but
- // seems useful. For example, read() receives size_t argument,
- // and its return value, which is of type ssize_t, cannot be greater
- // than this argument. If we made a promotion, and the size argument
- // is equal to, say, 10, then we'd impose a range of [0, 10] on the
- // return value, however the correct range is [-1, 10].
- //
- // Please update the list of functions in the header after editing!
-
// Below are helpers functions to create the summaries.
auto ArgumentCondition = [](ArgNo ArgN, RangeKind Kind,
IntRangeVector Ranges) {
@@ -910,9 +1036,22 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
return std::make_shared<ComparisonConstraint>(Ret, Op, OtherArgN);
}
} ReturnValueCondition;
- auto Range = [](RangeInt b, RangeInt e) {
- return IntRangeVector{std::pair<RangeInt, RangeInt>{b, e}};
- };
+ struct {
+ auto operator()(RangeInt b, RangeInt e) {
+ return IntRangeVector{std::pair<RangeInt, RangeInt>{b, e}};
+ }
+ auto operator()(RangeInt b, Optional<RangeInt> e) {
+ if (e)
+ return IntRangeVector{std::pair<RangeInt, RangeInt>{b, *e}};
+ return IntRangeVector{};
+ }
+ auto operator()(std::pair<RangeInt, RangeInt> i0,
+ std::pair<RangeInt, Optional<RangeInt>> i1) {
+ if (i1.second)
+ return IntRangeVector{i0, {i1.first, *(i1.second)}};
+ return IntRangeVector{i0};
+ }
+ } Range;
auto SingleValue = [](RangeInt v) {
return IntRangeVector{std::pair<RangeInt, RangeInt>{v, v}};
};
@@ -921,60 +1060,28 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
return std::make_shared<NotNullConstraint>(ArgN);
};
- Optional<QualType> FileTy = lookupType("FILE", ACtx);
- Optional<QualType> FilePtrTy, FilePtrRestrictTy;
- if (FileTy) {
- // FILE *
- FilePtrTy = ACtx.getPointerType(*FileTy);
- // FILE *restrict
- FilePtrRestrictTy =
- ACtx.getLangOpts().C99 ? ACtx.getRestrictType(*FilePtrTy) : *FilePtrTy;
- }
+ Optional<QualType> FileTy = lookupTy("FILE");
+ Optional<QualType> FilePtrTy = getPointerTy(FileTy);
+ Optional<QualType> FilePtrRestrictTy = getRestrictTy(FilePtrTy);
- using RetType = QualType;
- // Templates for summaries that are reused by many functions.
- auto Getc = [&]() {
- return Summary(ArgTypes{*FilePtrTy}, RetType{IntTy}, NoEvalCall)
- .Case({ReturnValueCondition(WithinRange,
- {{EOFv, EOFv}, {0, UCharRangeMax}})});
- };
- auto Read = [&](RetType R, RangeInt Max) {
- return Summary(ArgTypes{Irrelevant, Irrelevant, SizeTy}, RetType{R},
- NoEvalCall)
- .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Max))});
- };
- auto Fread = [&]() {
- return Summary(
- ArgTypes{VoidPtrRestrictTy, SizeTy, SizeTy, *FilePtrRestrictTy},
- RetType{SizeTy}, NoEvalCall)
- .Case({
- ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- })
- .ArgConstraint(NotNull(ArgNo(0)));
- };
- auto Fwrite = [&]() {
- return Summary(ArgTypes{ConstVoidPtrRestrictTy, SizeTy, SizeTy,
- *FilePtrRestrictTy},
- RetType{SizeTy}, NoEvalCall)
- .Case({
- ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- })
- .ArgConstraint(NotNull(ArgNo(0)));
- };
- auto Getline = [&](RetType R, RangeInt Max) {
- return Summary(ArgTypes{Irrelevant, Irrelevant, Irrelevant}, RetType{R},
- NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, {{-1, -1}, {1, Max}})});
- };
+ // We are finally ready to define specifications for all supported functions.
+ //
+ // Argument ranges should always cover all variants. If return value
+ // is completely unknown, omit it from the respective range set.
+ //
+ // Every item in the list of range sets represents a particular
+ // execution path the analyzer would need to explore once
+ // the call is modeled - a new program state is constructed
+ // for every range set, and each range line in the range set
+ // corresponds to a specific constraint within this state.
// The isascii() family of functions.
// The behavior is undefined if the value of the argument is not
// representable as unsigned char or is not equal to EOF. See e.g. C99
// 7.4.1.2 The isalpha function (p: 181-182).
addToFunctionSummaryMap(
- "isalnum",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "isalnum", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
// Boils down to isupper() or islower() or isdigit().
.Case({ArgumentCondition(0U, WithinRange,
{{'0', '9'}, {'A', 'Z'}, {'a', 'z'}}),
@@ -991,8 +1098,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(ArgumentCondition(
0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
addToFunctionSummaryMap(
- "isalpha",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "isalpha", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, {{'A', 'Z'}, {'a', 'z'}}),
ReturnValueCondition(OutOfRange, SingleValue(0))})
// The locale-specific range.
@@ -1002,43 +1109,43 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
{{'A', 'Z'}, {'a', 'z'}, {128, UCharRangeMax}}),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "isascii",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "isascii", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range(0, 127)),
ReturnValueCondition(OutOfRange, SingleValue(0))})
.Case({ArgumentCondition(0U, OutOfRange, Range(0, 127)),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "isblank",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "isblank", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, {{'\t', '\t'}, {' ', ' '}}),
ReturnValueCondition(OutOfRange, SingleValue(0))})
.Case({ArgumentCondition(0U, OutOfRange, {{'\t', '\t'}, {' ', ' '}}),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "iscntrl",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "iscntrl", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, {{0, 32}, {127, 127}}),
ReturnValueCondition(OutOfRange, SingleValue(0))})
.Case({ArgumentCondition(0U, OutOfRange, {{0, 32}, {127, 127}}),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "isdigit",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "isdigit", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range('0', '9')),
ReturnValueCondition(OutOfRange, SingleValue(0))})
.Case({ArgumentCondition(0U, OutOfRange, Range('0', '9')),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "isgraph",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "isgraph", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range(33, 126)),
ReturnValueCondition(OutOfRange, SingleValue(0))})
.Case({ArgumentCondition(0U, OutOfRange, Range(33, 126)),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "islower",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "islower", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
// Is certainly lowercase.
.Case({ArgumentCondition(0U, WithinRange, Range('a', 'z')),
ReturnValueCondition(OutOfRange, SingleValue(0))})
@@ -1052,15 +1159,15 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.Case({ArgumentCondition(0U, OutOfRange, Range(0, UCharRangeMax)),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "isprint",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "isprint", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range(32, 126)),
ReturnValueCondition(OutOfRange, SingleValue(0))})
.Case({ArgumentCondition(0U, OutOfRange, Range(32, 126)),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "ispunct",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "ispunct", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.Case({ArgumentCondition(
0U, WithinRange,
{{'!', '/'}, {':', '@'}, {'[', '`'}, {'{', '~'}}),
@@ -1070,8 +1177,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
{{'!', '/'}, {':', '@'}, {'[', '`'}, {'{', '~'}}),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "isspace",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "isspace", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
// Space, '\f', '\n', '\r', '\t', '\v'.
.Case({ArgumentCondition(0U, WithinRange, {{9, 13}, {' ', ' '}}),
ReturnValueCondition(OutOfRange, SingleValue(0))})
@@ -1081,8 +1188,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
{{9, 13}, {' ', ' '}, {128, UCharRangeMax}}),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "isupper",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "isupper", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
// Is certainly uppercase.
.Case({ArgumentCondition(0U, WithinRange, Range('A', 'Z')),
ReturnValueCondition(OutOfRange, SingleValue(0))})
@@ -1093,650 +1200,1290 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
{{'A', 'Z'}, {128, UCharRangeMax}}),
ReturnValueCondition(WithinRange, SingleValue(0))}));
addToFunctionSummaryMap(
- "isxdigit",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "isxdigit", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange,
{{'0', '9'}, {'A', 'F'}, {'a', 'f'}}),
ReturnValueCondition(OutOfRange, SingleValue(0))})
.Case({ArgumentCondition(0U, OutOfRange,
{{'0', '9'}, {'A', 'F'}, {'a', 'f'}}),
ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "toupper", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ addToFunctionSummaryMap(
+ "tolower", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ addToFunctionSummaryMap(
+ "toascii", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
// The getc() family of functions that returns either a char or an EOF.
- if (FilePtrTy) {
- addToFunctionSummaryMap("getc", Getc());
- addToFunctionSummaryMap("fgetc", Getc());
- }
addToFunctionSummaryMap(
- "getchar", Summary(ArgTypes{}, RetType{IntTy}, NoEvalCall)
- .Case({ReturnValueCondition(
- WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})}));
+ {"getc", "fgetc"}, Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}})}));
+ addToFunctionSummaryMap(
+ "getchar", Signature(ArgTypes{}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}})}));
// read()-like functions that never return more than buffer size.
- if (FilePtrRestrictTy) {
- addToFunctionSummaryMap("fread", Fread());
- addToFunctionSummaryMap("fwrite", Fwrite());
- }
+ auto FreadSummary =
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(0, SizeMax))})
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(3)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1),
+ /*BufSizeMultiplier=*/ArgNo(2)));
+
+ // size_t fread(void *restrict ptr, size_t size, size_t nitems,
+ // FILE *restrict stream);
+ addToFunctionSummaryMap(
+ "fread",
+ Signature(ArgTypes{VoidPtrRestrictTy, SizeTy, SizeTy, FilePtrRestrictTy},
+ RetType{SizeTy}),
+ FreadSummary);
+ // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems,
+ // FILE *restrict stream);
+ addToFunctionSummaryMap("fwrite",
+ Signature(ArgTypes{ConstVoidPtrRestrictTy, SizeTy,
+ SizeTy, FilePtrRestrictTy},
+ RetType{SizeTy}),
+ FreadSummary);
+
+ Optional<QualType> Ssize_tTy = lookupTy("ssize_t");
+ Optional<RangeInt> Ssize_tMax = getMaxValue(Ssize_tTy);
+
+ auto ReadSummary =
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))});
- // We are not sure how ssize_t is defined on every platform, so we
- // provide three variants that should cover common cases.
// FIXME these are actually defined by POSIX and not by the C standard, we
// should handle them together with the rest of the POSIX functions.
- addToFunctionSummaryMap("read", {Read(IntTy, IntMax), Read(LongTy, LongMax),
- Read(LongLongTy, LongLongMax)});
- addToFunctionSummaryMap("write", {Read(IntTy, IntMax), Read(LongTy, LongMax),
- Read(LongLongTy, LongLongMax)});
+ // ssize_t read(int fildes, void *buf, size_t nbyte);
+ addToFunctionSummaryMap(
+ "read", Signature(ArgTypes{IntTy, VoidPtrTy, SizeTy}, RetType{Ssize_tTy}),
+ ReadSummary);
+ // ssize_t write(int fildes, const void *buf, size_t nbyte);
+ addToFunctionSummaryMap(
+ "write",
+ Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTy}, RetType{Ssize_tTy}),
+ ReadSummary);
+
+ auto GetLineSummary =
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange,
+ Range({-1, -1}, {1, Ssize_tMax}))});
+
+ QualType CharPtrPtrRestrictTy = getRestrictTy(getPointerTy(CharPtrTy));
// getline()-like functions either fail or read at least the delimiter.
// FIXME these are actually defined by POSIX and not by the C standard, we
// should handle them together with the rest of the POSIX functions.
- addToFunctionSummaryMap("getline",
- {Getline(IntTy, IntMax), Getline(LongTy, LongMax),
- Getline(LongLongTy, LongLongMax)});
- addToFunctionSummaryMap("getdelim",
- {Getline(IntTy, IntMax), Getline(LongTy, LongMax),
- Getline(LongLongTy, LongLongMax)});
+ // ssize_t getline(char **restrict lineptr, size_t *restrict n,
+ // FILE *restrict stream);
+ addToFunctionSummaryMap(
+ "getline",
+ Signature(
+ ArgTypes{CharPtrPtrRestrictTy, SizePtrRestrictTy, FilePtrRestrictTy},
+ RetType{Ssize_tTy}),
+ GetLineSummary);
+ // ssize_t getdelim(char **restrict lineptr, size_t *restrict n,
+ // int delimiter, FILE *restrict stream);
+ addToFunctionSummaryMap(
+ "getdelim",
+ Signature(ArgTypes{CharPtrPtrRestrictTy, SizePtrRestrictTy, IntTy,
+ FilePtrRestrictTy},
+ RetType{Ssize_tTy}),
+ GetLineSummary);
if (ModelPOSIX) {
// long a64l(const char *str64);
addToFunctionSummaryMap(
- "a64l", Summary(ArgTypes{ConstCharPtrTy}, RetType{LongTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ "a64l", Signature(ArgTypes{ConstCharPtrTy}, RetType{LongTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// char *l64a(long value);
- addToFunctionSummaryMap(
- "l64a", Summary(ArgTypes{LongTy}, RetType{CharPtrTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, LongMax))));
+ addToFunctionSummaryMap("l64a",
+ Signature(ArgTypes{LongTy}, RetType{CharPtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(ArgumentCondition(
+ 0, WithinRange, Range(0, LongMax))));
+
+ const auto ReturnsZeroOrMinusOne =
+ ConstraintSet{ReturnValueCondition(WithinRange, Range(-1, 0))};
+ const auto ReturnsFileDescriptor =
+ ConstraintSet{ReturnValueCondition(WithinRange, Range(-1, IntMax))};
// int access(const char *pathname, int amode);
- addToFunctionSummaryMap("access", Summary(ArgTypes{ConstCharPtrTy, IntTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "access", Signature(ArgTypes{ConstCharPtrTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int faccessat(int dirfd, const char *pathname, int mode, int flags);
addToFunctionSummaryMap(
- "faccessat", Summary(ArgTypes{IntTy, ConstCharPtrTy, IntTy, IntTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(1))));
+ "faccessat",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, IntTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(1))));
// int dup(int fildes);
- addToFunctionSummaryMap(
- "dup", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap("dup", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsFileDescriptor)
+ .ArgConstraint(ArgumentCondition(
+ 0, WithinRange, Range(0, IntMax))));
// int dup2(int fildes1, int filedes2);
addToFunctionSummaryMap(
- "dup2",
- Summary(ArgTypes{IntTy, IntTy}, RetType{IntTy}, NoEvalCall)
+ "dup2", Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsFileDescriptor)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(
ArgumentCondition(1, WithinRange, Range(0, IntMax))));
// int fdatasync(int fildes);
- addToFunctionSummaryMap(
- "fdatasync", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(ArgumentCondition(0, WithinRange,
- Range(0, IntMax))));
+ addToFunctionSummaryMap("fdatasync",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(ArgumentCondition(
+ 0, WithinRange, Range(0, IntMax))));
// int fnmatch(const char *pattern, const char *string, int flags);
addToFunctionSummaryMap(
- "fnmatch", Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy, IntTy},
- RetType{IntTy}, EvalCallAsPure)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1))));
+ "fnmatch",
+ Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy, IntTy},
+ RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
// int fsync(int fildes);
- addToFunctionSummaryMap(
- "fsync", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap("fsync", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(ArgumentCondition(
+ 0, WithinRange, Range(0, IntMax))));
- Optional<QualType> Off_tTy = lookupType("off_t", ACtx);
+ Optional<QualType> Off_tTy = lookupTy("off_t");
- if (Off_tTy)
- // int truncate(const char *path, off_t length);
- addToFunctionSummaryMap("truncate",
- Summary(ArgTypes{ConstCharPtrTy, *Off_tTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // int truncate(const char *path, off_t length);
+ addToFunctionSummaryMap(
+ "truncate",
+ Signature(ArgTypes{ConstCharPtrTy, Off_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int symlink(const char *oldpath, const char *newpath);
- addToFunctionSummaryMap("symlink",
- Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1))));
+ addToFunctionSummaryMap(
+ "symlink",
+ Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
// int symlinkat(const char *oldpath, int newdirfd, const char *newpath);
addToFunctionSummaryMap(
"symlinkat",
- Summary(ArgTypes{ConstCharPtrTy, IntTy, ConstCharPtrTy}, RetType{IntTy},
- NoEvalCall)
+ Signature(ArgTypes{ConstCharPtrTy, IntTy, ConstCharPtrTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(ArgumentCondition(1, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(2))));
- if (Off_tTy)
- // int lockf(int fd, int cmd, off_t len);
- addToFunctionSummaryMap(
- "lockf",
- Summary(ArgTypes{IntTy, IntTy, *Off_tTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+ // int lockf(int fd, int cmd, off_t len);
+ addToFunctionSummaryMap(
+ "lockf", Signature(ArgTypes{IntTy, IntTy, Off_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- Optional<QualType> Mode_tTy = lookupType("mode_t", ACtx);
+ Optional<QualType> Mode_tTy = lookupTy("mode_t");
- if (Mode_tTy)
- // int creat(const char *pathname, mode_t mode);
- addToFunctionSummaryMap("creat",
- Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // int creat(const char *pathname, mode_t mode);
+ addToFunctionSummaryMap(
+ "creat", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsFileDescriptor)
+ .ArgConstraint(NotNull(ArgNo(0))));
// unsigned int sleep(unsigned int seconds);
addToFunctionSummaryMap(
- "sleep",
- Summary(ArgTypes{UnsignedIntTy}, RetType{UnsignedIntTy}, NoEvalCall)
+ "sleep", Signature(ArgTypes{UnsignedIntTy}, RetType{UnsignedIntTy}),
+ Summary(NoEvalCall)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, UnsignedIntMax))));
- Optional<QualType> DirTy = lookupType("DIR", ACtx);
- Optional<QualType> DirPtrTy;
- if (DirTy)
- DirPtrTy = ACtx.getPointerType(*DirTy);
+ Optional<QualType> DirTy = lookupTy("DIR");
+ Optional<QualType> DirPtrTy = getPointerTy(DirTy);
- if (DirPtrTy)
- // int dirfd(DIR *dirp);
- addToFunctionSummaryMap(
- "dirfd", Summary(ArgTypes{*DirPtrTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // int dirfd(DIR *dirp);
+ addToFunctionSummaryMap("dirfd",
+ Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsFileDescriptor)
+ .ArgConstraint(NotNull(ArgNo(0))));
// unsigned int alarm(unsigned int seconds);
addToFunctionSummaryMap(
- "alarm",
- Summary(ArgTypes{UnsignedIntTy}, RetType{UnsignedIntTy}, NoEvalCall)
+ "alarm", Signature(ArgTypes{UnsignedIntTy}, RetType{UnsignedIntTy}),
+ Summary(NoEvalCall)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, UnsignedIntMax))));
- if (DirPtrTy)
- // int closedir(DIR *dir);
- addToFunctionSummaryMap(
- "closedir", Summary(ArgTypes{*DirPtrTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // int closedir(DIR *dir);
+ addToFunctionSummaryMap("closedir",
+ Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
// char *strdup(const char *s);
- addToFunctionSummaryMap("strdup", Summary(ArgTypes{ConstCharPtrTy},
- RetType{CharPtrTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "strdup", Signature(ArgTypes{ConstCharPtrTy}, RetType{CharPtrTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// char *strndup(const char *s, size_t n);
addToFunctionSummaryMap(
- "strndup", Summary(ArgTypes{ConstCharPtrTy, SizeTy}, RetType{CharPtrTy},
- NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(ArgumentCondition(1, WithinRange,
- Range(0, SizeMax))));
+ "strndup",
+ Signature(ArgTypes{ConstCharPtrTy, SizeTy}, RetType{CharPtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(0, SizeMax))));
// wchar_t *wcsdup(const wchar_t *s);
- addToFunctionSummaryMap("wcsdup", Summary(ArgTypes{ConstWchar_tPtrTy},
- RetType{Wchar_tPtrTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "wcsdup", Signature(ArgTypes{ConstWchar_tPtrTy}, RetType{Wchar_tPtrTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// int mkstemp(char *template);
- addToFunctionSummaryMap(
- "mkstemp", Summary(ArgTypes{CharPtrTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap("mkstemp",
+ Signature(ArgTypes{CharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsFileDescriptor)
+ .ArgConstraint(NotNull(ArgNo(0))));
// char *mkdtemp(char *template);
addToFunctionSummaryMap(
- "mkdtemp", Summary(ArgTypes{CharPtrTy}, RetType{CharPtrTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ "mkdtemp", Signature(ArgTypes{CharPtrTy}, RetType{CharPtrTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// char *getcwd(char *buf, size_t size);
addToFunctionSummaryMap(
- "getcwd",
- Summary(ArgTypes{CharPtrTy, SizeTy}, RetType{CharPtrTy}, NoEvalCall)
+ "getcwd", Signature(ArgTypes{CharPtrTy, SizeTy}, RetType{CharPtrTy}),
+ Summary(NoEvalCall)
.ArgConstraint(
ArgumentCondition(1, WithinRange, Range(0, SizeMax))));
- if (Mode_tTy) {
- // int mkdir(const char *pathname, mode_t mode);
- addToFunctionSummaryMap("mkdir",
- Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // int mkdir(const char *pathname, mode_t mode);
+ addToFunctionSummaryMap(
+ "mkdir", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
- // int mkdirat(int dirfd, const char *pathname, mode_t mode);
- addToFunctionSummaryMap(
- "mkdirat", Summary(ArgTypes{IntTy, ConstCharPtrTy, *Mode_tTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(1))));
- }
+ // int mkdirat(int dirfd, const char *pathname, mode_t mode);
+ addToFunctionSummaryMap(
+ "mkdirat",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(1))));
- Optional<QualType> Dev_tTy = lookupType("dev_t", ACtx);
+ Optional<QualType> Dev_tTy = lookupTy("dev_t");
- if (Mode_tTy && Dev_tTy) {
- // int mknod(const char *pathname, mode_t mode, dev_t dev);
- addToFunctionSummaryMap(
- "mknod", Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy, *Dev_tTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
-
- // int mknodat(int dirfd, const char *pathname, mode_t mode, dev_t dev);
- addToFunctionSummaryMap("mknodat", Summary(ArgTypes{IntTy, ConstCharPtrTy,
- *Mode_tTy, *Dev_tTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(1))));
- }
+ // int mknod(const char *pathname, mode_t mode, dev_t dev);
+ addToFunctionSummaryMap(
+ "mknod",
+ Signature(ArgTypes{ConstCharPtrTy, Mode_tTy, Dev_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
- if (Mode_tTy) {
- // int chmod(const char *path, mode_t mode);
- addToFunctionSummaryMap("chmod",
- Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // int mknodat(int dirfd, const char *pathname, mode_t mode, dev_t dev);
+ addToFunctionSummaryMap(
+ "mknodat",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy, Dev_tTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(1))));
- // int fchmodat(int dirfd, const char *pathname, mode_t mode, int flags);
- addToFunctionSummaryMap(
- "fchmodat", Summary(ArgTypes{IntTy, ConstCharPtrTy, *Mode_tTy, IntTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(ArgumentCondition(0, WithinRange,
- Range(0, IntMax)))
- .ArgConstraint(NotNull(ArgNo(1))));
+ // int chmod(const char *path, mode_t mode);
+ addToFunctionSummaryMap(
+ "chmod", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
- // int fchmod(int fildes, mode_t mode);
- addToFunctionSummaryMap(
- "fchmod",
- Summary(ArgTypes{IntTy, *Mode_tTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- }
+ // int fchmodat(int dirfd, const char *pathname, mode_t mode, int flags);
+ addToFunctionSummaryMap(
+ "fchmodat",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy, IntTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
- Optional<QualType> Uid_tTy = lookupType("uid_t", ACtx);
- Optional<QualType> Gid_tTy = lookupType("gid_t", ACtx);
+ // int fchmod(int fildes, mode_t mode);
+ addToFunctionSummaryMap(
+ "fchmod", Signature(ArgTypes{IntTy, Mode_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- if (Uid_tTy && Gid_tTy) {
- // int fchownat(int dirfd, const char *pathname, uid_t owner, gid_t group,
- // int flags);
- addToFunctionSummaryMap(
- "fchownat",
- Summary(ArgTypes{IntTy, ConstCharPtrTy, *Uid_tTy, *Gid_tTy, IntTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, IntMax)))
- .ArgConstraint(NotNull(ArgNo(1))));
+ Optional<QualType> Uid_tTy = lookupTy("uid_t");
+ Optional<QualType> Gid_tTy = lookupTy("gid_t");
- // int chown(const char *path, uid_t owner, gid_t group);
- addToFunctionSummaryMap(
- "chown", Summary(ArgTypes{ConstCharPtrTy, *Uid_tTy, *Gid_tTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // int fchownat(int dirfd, const char *pathname, uid_t owner, gid_t group,
+ // int flags);
+ addToFunctionSummaryMap(
+ "fchownat",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy, Uid_tTy, Gid_tTy, IntTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
- // int lchown(const char *path, uid_t owner, gid_t group);
- addToFunctionSummaryMap(
- "lchown", Summary(ArgTypes{ConstCharPtrTy, *Uid_tTy, *Gid_tTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // int chown(const char *path, uid_t owner, gid_t group);
+ addToFunctionSummaryMap(
+ "chown",
+ Signature(ArgTypes{ConstCharPtrTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
- // int fchown(int fildes, uid_t owner, gid_t group);
- addToFunctionSummaryMap(
- "fchown", Summary(ArgTypes{IntTy, *Uid_tTy, *Gid_tTy}, RetType{IntTy},
- NoEvalCall)
- .ArgConstraint(ArgumentCondition(0, WithinRange,
- Range(0, IntMax))));
- }
+ // int lchown(const char *path, uid_t owner, gid_t group);
+ addToFunctionSummaryMap(
+ "lchown",
+ Signature(ArgTypes{ConstCharPtrTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
- // int rmdir(const char *pathname);
+ // int fchown(int fildes, uid_t owner, gid_t group);
addToFunctionSummaryMap(
- "rmdir", Summary(ArgTypes{ConstCharPtrTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ "fchown", Signature(ArgTypes{IntTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ // int rmdir(const char *pathname);
+ addToFunctionSummaryMap("rmdir",
+ Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int chdir(const char *path);
- addToFunctionSummaryMap(
- "chdir", Summary(ArgTypes{ConstCharPtrTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap("chdir",
+ Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int link(const char *oldpath, const char *newpath);
- addToFunctionSummaryMap("link",
- Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1))));
+ addToFunctionSummaryMap(
+ "link",
+ Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
// int linkat(int fd1, const char *path1, int fd2, const char *path2,
// int flag);
addToFunctionSummaryMap(
"linkat",
- Summary(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy, IntTy},
- RetType{IntTy}, NoEvalCall)
+ Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy, IntTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(ArgumentCondition(2, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(3))));
// int unlink(const char *pathname);
- addToFunctionSummaryMap(
- "unlink", Summary(ArgTypes{ConstCharPtrTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap("unlink",
+ Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int unlinkat(int fd, const char *path, int flag);
addToFunctionSummaryMap(
"unlinkat",
- Summary(ArgTypes{IntTy, ConstCharPtrTy, IntTy}, RetType{IntTy},
- NoEvalCall)
+ Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1))));
- Optional<QualType> StructStatTy = lookupType("stat", ACtx);
- Optional<QualType> StructStatPtrTy, StructStatPtrRestrictTy;
- if (StructStatTy) {
- StructStatPtrTy = ACtx.getPointerType(*StructStatTy);
- StructStatPtrRestrictTy = ACtx.getLangOpts().C99
- ? ACtx.getRestrictType(*StructStatPtrTy)
- : *StructStatPtrTy;
- }
+ Optional<QualType> StructStatTy = lookupTy("stat");
+ Optional<QualType> StructStatPtrTy = getPointerTy(StructStatTy);
+ Optional<QualType> StructStatPtrRestrictTy = getRestrictTy(StructStatPtrTy);
- if (StructStatPtrTy)
- // int fstat(int fd, struct stat *statbuf);
- addToFunctionSummaryMap(
- "fstat",
- Summary(ArgTypes{IntTy, *StructStatPtrTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, IntMax)))
- .ArgConstraint(NotNull(ArgNo(1))));
+ // int fstat(int fd, struct stat *statbuf);
+ addToFunctionSummaryMap(
+ "fstat", Signature(ArgTypes{IntTy, StructStatPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
- if (StructStatPtrRestrictTy) {
- // int stat(const char *restrict path, struct stat *restrict buf);
- addToFunctionSummaryMap(
- "stat",
- Summary(ArgTypes{ConstCharPtrRestrictTy, *StructStatPtrRestrictTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1))));
+ // int stat(const char *restrict path, struct stat *restrict buf);
+ addToFunctionSummaryMap(
+ "stat",
+ Signature(ArgTypes{ConstCharPtrRestrictTy, StructStatPtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
- // int lstat(const char *restrict path, struct stat *restrict buf);
- addToFunctionSummaryMap(
- "lstat",
- Summary(ArgTypes{ConstCharPtrRestrictTy, *StructStatPtrRestrictTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1))));
-
- // int fstatat(int fd, const char *restrict path,
- // struct stat *restrict buf, int flag);
- addToFunctionSummaryMap(
- "fstatat", Summary(ArgTypes{IntTy, ConstCharPtrRestrictTy,
- *StructStatPtrRestrictTy, IntTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(ArgumentCondition(0, WithinRange,
- Range(0, IntMax)))
- .ArgConstraint(NotNull(ArgNo(1)))
- .ArgConstraint(NotNull(ArgNo(2))));
- }
+ // int lstat(const char *restrict path, struct stat *restrict buf);
+ addToFunctionSummaryMap(
+ "lstat",
+ Signature(ArgTypes{ConstCharPtrRestrictTy, StructStatPtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
- if (DirPtrTy) {
- // DIR *opendir(const char *name);
- addToFunctionSummaryMap("opendir", Summary(ArgTypes{ConstCharPtrTy},
- RetType{*DirPtrTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // int fstatat(int fd, const char *restrict path,
+ // struct stat *restrict buf, int flag);
+ addToFunctionSummaryMap(
+ "fstatat",
+ Signature(ArgTypes{IntTy, ConstCharPtrRestrictTy,
+ StructStatPtrRestrictTy, IntTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2))));
- // DIR *fdopendir(int fd);
- addToFunctionSummaryMap(
- "fdopendir", Summary(ArgTypes{IntTy}, RetType{*DirPtrTy}, NoEvalCall)
- .ArgConstraint(ArgumentCondition(0, WithinRange,
- Range(0, IntMax))));
- }
+ // DIR *opendir(const char *name);
+ addToFunctionSummaryMap(
+ "opendir", Signature(ArgTypes{ConstCharPtrTy}, RetType{DirPtrTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ // DIR *fdopendir(int fd);
+ addToFunctionSummaryMap("fdopendir",
+ Signature(ArgTypes{IntTy}, RetType{DirPtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(ArgumentCondition(
+ 0, WithinRange, Range(0, IntMax))));
// int isatty(int fildes);
addToFunctionSummaryMap(
- "isatty", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+ "isatty", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, Range(0, 1))})
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- if (FilePtrTy) {
- // FILE *popen(const char *command, const char *type);
- addToFunctionSummaryMap("popen",
- Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
- RetType{*FilePtrTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1))));
+ // FILE *popen(const char *command, const char *type);
+ addToFunctionSummaryMap(
+ "popen",
+ Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{FilePtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
- // int pclose(FILE *stream);
- addToFunctionSummaryMap(
- "pclose", Summary(ArgTypes{*FilePtrTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
- }
+ // int pclose(FILE *stream);
+ addToFunctionSummaryMap(
+ "pclose", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// int close(int fildes);
- addToFunctionSummaryMap(
- "close", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap("close", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(ArgumentCondition(
+ 0, WithinRange, Range(-1, IntMax))));
// long fpathconf(int fildes, int name);
- addToFunctionSummaryMap(
- "fpathconf",
- Summary(ArgTypes{IntTy, IntTy}, RetType{LongTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap("fpathconf",
+ Signature(ArgTypes{IntTy, IntTy}, RetType{LongTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(ArgumentCondition(
+ 0, WithinRange, Range(0, IntMax))));
// long pathconf(const char *path, int name);
- addToFunctionSummaryMap("pathconf", Summary(ArgTypes{ConstCharPtrTy, IntTy},
- RetType{LongTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "pathconf", Signature(ArgTypes{ConstCharPtrTy, IntTy}, RetType{LongTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
- if (FilePtrTy)
- // FILE *fdopen(int fd, const char *mode);
- addToFunctionSummaryMap(
- "fdopen", Summary(ArgTypes{IntTy, ConstCharPtrTy},
- RetType{*FilePtrTy}, NoEvalCall)
- .ArgConstraint(
- ArgumentCondition(0, WithinRange, Range(0, IntMax)))
- .ArgConstraint(NotNull(ArgNo(1))));
-
- if (DirPtrTy) {
- // void rewinddir(DIR *dir);
- addToFunctionSummaryMap(
- "rewinddir", Summary(ArgTypes{*DirPtrTy}, RetType{VoidTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // FILE *fdopen(int fd, const char *mode);
+ addToFunctionSummaryMap(
+ "fdopen",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy}, RetType{FilePtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
- // void seekdir(DIR *dirp, long loc);
- addToFunctionSummaryMap("seekdir", Summary(ArgTypes{*DirPtrTy, LongTy},
- RetType{VoidTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
- }
+ // void rewinddir(DIR *dir);
+ addToFunctionSummaryMap(
+ "rewinddir", Signature(ArgTypes{DirPtrTy}, RetType{VoidTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ // void seekdir(DIR *dirp, long loc);
+ addToFunctionSummaryMap(
+ "seekdir", Signature(ArgTypes{DirPtrTy, LongTy}, RetType{VoidTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// int rand_r(unsigned int *seedp);
- addToFunctionSummaryMap("rand_r", Summary(ArgTypes{UnsignedIntPtrTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
-
- // int strcasecmp(const char *s1, const char *s2);
- addToFunctionSummaryMap("strcasecmp",
- Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
- RetType{IntTy}, EvalCallAsPure)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1))));
+ addToFunctionSummaryMap(
+ "rand_r", Signature(ArgTypes{UnsignedIntPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ // int fileno(FILE *stream);
+ addToFunctionSummaryMap("fileno",
+ Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsFileDescriptor)
+ .ArgConstraint(NotNull(ArgNo(0))));
- // int strncasecmp(const char *s1, const char *s2, size_t n);
+ // int fseeko(FILE *stream, off_t offset, int whence);
addToFunctionSummaryMap(
- "strncasecmp", Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy, SizeTy},
- RetType{IntTy}, EvalCallAsPure)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1)))
- .ArgConstraint(ArgumentCondition(
- 2, WithinRange, Range(0, SizeMax))));
+ "fseeko",
+ Signature(ArgTypes{FilePtrTy, Off_tTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
- if (FilePtrTy && Off_tTy) {
+ // off_t ftello(FILE *stream);
+ addToFunctionSummaryMap(
+ "ftello", Signature(ArgTypes{FilePtrTy}, RetType{Off_tTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
- // int fileno(FILE *stream);
- addToFunctionSummaryMap(
- "fileno", Summary(ArgTypes{*FilePtrTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // void *mmap(void *addr, size_t length, int prot, int flags, int fd,
+ // off_t offset);
+ addToFunctionSummaryMap(
+ "mmap",
+ Signature(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, Off_tTy},
+ RetType{VoidPtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(ArgumentCondition(1, WithinRange, Range(1, SizeMax)))
+ .ArgConstraint(
+ ArgumentCondition(4, WithinRange, Range(-1, IntMax))));
- // int fseeko(FILE *stream, off_t offset, int whence);
- addToFunctionSummaryMap("fseeko",
- Summary(ArgTypes{*FilePtrTy, *Off_tTy, IntTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ Optional<QualType> Off64_tTy = lookupTy("off64_t");
+ // void *mmap64(void *addr, size_t length, int prot, int flags, int fd,
+ // off64_t offset);
+ addToFunctionSummaryMap(
+ "mmap64",
+ Signature(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, Off64_tTy},
+ RetType{VoidPtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(ArgumentCondition(1, WithinRange, Range(1, SizeMax)))
+ .ArgConstraint(
+ ArgumentCondition(4, WithinRange, Range(-1, IntMax))));
- // off_t ftello(FILE *stream);
- addToFunctionSummaryMap(
- "ftello", Summary(ArgTypes{*FilePtrTy}, RetType{*Off_tTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
- }
+ // int pipe(int fildes[2]);
+ addToFunctionSummaryMap("pipe",
+ Signature(ArgTypes{IntPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // off_t lseek(int fildes, off_t offset, int whence);
+ addToFunctionSummaryMap(
+ "lseek", Signature(ArgTypes{IntTy, Off_tTy, IntTy}, RetType{Off_tTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ // ssize_t readlink(const char *restrict path, char *restrict buf,
+ // size_t bufsize);
+ addToFunctionSummaryMap(
+ "readlink",
+ Signature(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy, SizeTy},
+ RetType{Ssize_tTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
+ /*BufSize=*/ArgNo(2)))
+ .ArgConstraint(
+ ArgumentCondition(2, WithinRange, Range(0, SizeMax))));
+
+ // ssize_t readlinkat(int fd, const char *restrict path,
+ // char *restrict buf, size_t bufsize);
+ addToFunctionSummaryMap(
+ "readlinkat",
+ Signature(
+ ArgTypes{IntTy, ConstCharPtrRestrictTy, CharPtrRestrictTy, SizeTy},
+ RetType{Ssize_tTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(LessThanOrEq, ArgNo(3)),
+ ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(2),
+ /*BufSize=*/ArgNo(3)))
+ .ArgConstraint(
+ ArgumentCondition(3, WithinRange, Range(0, SizeMax))));
+
+ // int renameat(int olddirfd, const char *oldpath, int newdirfd, const char
+ // *newpath);
+ addToFunctionSummaryMap(
+ "renameat",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(3))));
+
+ // char *realpath(const char *restrict file_name,
+ // char *restrict resolved_name);
+ addToFunctionSummaryMap(
+ "realpath",
+ Signature(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy},
+ RetType{CharPtrTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ QualType CharPtrConstPtr = getPointerTy(getConstTy(CharPtrTy));
+
+ // int execv(const char *path, char *const argv[]);
+ addToFunctionSummaryMap(
+ "execv",
+ Signature(ArgTypes{ConstCharPtrTy, CharPtrConstPtr}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(-1))})
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int execvp(const char *file, char *const argv[]);
+ addToFunctionSummaryMap(
+ "execvp",
+ Signature(ArgTypes{ConstCharPtrTy, CharPtrConstPtr}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(-1))})
+ .ArgConstraint(NotNull(ArgNo(0))));
- if (Off_tTy) {
- Optional<RangeInt> Off_tMax = BVF.getMaxValue(*Off_tTy).getLimitedValue();
+ // int getopt(int argc, char * const argv[], const char *optstring);
+ addToFunctionSummaryMap(
+ "getopt",
+ Signature(ArgTypes{IntTy, CharPtrConstPtr, ConstCharPtrTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, Range(-1, UCharRangeMax))})
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2))));
- // void *mmap(void *addr, size_t length, int prot, int flags, int fd,
- // off_t offset);
+ Optional<QualType> StructSockaddrTy = lookupTy("sockaddr");
+ Optional<QualType> StructSockaddrPtrTy = getPointerTy(StructSockaddrTy);
+ Optional<QualType> ConstStructSockaddrPtrTy =
+ getPointerTy(getConstTy(StructSockaddrTy));
+ Optional<QualType> StructSockaddrPtrRestrictTy =
+ getRestrictTy(StructSockaddrPtrTy);
+ Optional<QualType> ConstStructSockaddrPtrRestrictTy =
+ getRestrictTy(ConstStructSockaddrPtrTy);
+ Optional<QualType> Socklen_tTy = lookupTy("socklen_t");
+ Optional<QualType> Socklen_tPtrTy = getPointerTy(Socklen_tTy);
+ Optional<QualType> Socklen_tPtrRestrictTy = getRestrictTy(Socklen_tPtrTy);
+ Optional<RangeInt> Socklen_tMax = getMaxValue(Socklen_tTy);
+
+ // In 'socket.h' of some libc implementations with C99, sockaddr parameter
+ // is a transparent union of the underlying sockaddr_ family of pointers
+ // instead of being a pointer to struct sockaddr. In these cases, the
+ // standardized signature will not match, thus we try to match with another
+ // signature that has the joker Irrelevant type. We also remove those
+ // constraints which require pointer types for the sockaddr param.
+ auto Accept =
+ Summary(NoEvalCall)
+ .Case(ReturnsFileDescriptor)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)));
+ if (!addToFunctionSummaryMap(
+ "accept",
+ // int accept(int socket, struct sockaddr *restrict address,
+ // socklen_t *restrict address_len);
+ Signature(ArgTypes{IntTy, StructSockaddrPtrRestrictTy,
+ Socklen_tPtrRestrictTy},
+ RetType{IntTy}),
+ Accept))
+ addToFunctionSummaryMap(
+ "accept",
+ Signature(ArgTypes{IntTy, Irrelevant, Socklen_tPtrRestrictTy},
+ RetType{IntTy}),
+ Accept);
+
+ // int bind(int socket, const struct sockaddr *address, socklen_t
+ // address_len);
+ if (!addToFunctionSummaryMap(
+ "bind",
+ Signature(ArgTypes{IntTy, ConstStructSockaddrPtrTy, Socklen_tTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(
+ BufferSize(/*Buffer=*/ArgNo(1), /*BufSize=*/ArgNo(2)))
+ .ArgConstraint(
+ ArgumentCondition(2, WithinRange, Range(0, Socklen_tMax)))))
+ // Do not add constraints on sockaddr.
addToFunctionSummaryMap(
- "mmap",
- Summary(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, *Off_tTy},
- RetType{VoidPtrTy}, NoEvalCall)
+ "bind",
+ Signature(ArgTypes{IntTy, Irrelevant, Socklen_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
.ArgConstraint(
- ArgumentCondition(1, WithinRange, Range(1, SizeMax)))
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(
- ArgumentCondition(4, WithinRange, Range(0, *Off_tMax))));
- }
+ ArgumentCondition(2, WithinRange, Range(0, Socklen_tMax))));
+
+ // int getpeername(int socket, struct sockaddr *restrict address,
+ // socklen_t *restrict address_len);
+ if (!addToFunctionSummaryMap(
+ "getpeername",
+ Signature(ArgTypes{IntTy, StructSockaddrPtrRestrictTy,
+ Socklen_tPtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2)))))
+ addToFunctionSummaryMap(
+ "getpeername",
+ Signature(ArgTypes{IntTy, Irrelevant, Socklen_tPtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- Optional<QualType> Off64_tTy = lookupType("off64_t", ACtx);
- Optional<RangeInt> Off64_tMax;
- if (Off64_tTy) {
- Off64_tMax = BVF.getMaxValue(*Off_tTy).getLimitedValue();
- // void *mmap64(void *addr, size_t length, int prot, int flags, int fd,
- // off64_t offset);
+ // int getsockname(int socket, struct sockaddr *restrict address,
+ // socklen_t *restrict address_len);
+ if (!addToFunctionSummaryMap(
+ "getsockname",
+ Signature(ArgTypes{IntTy, StructSockaddrPtrRestrictTy,
+ Socklen_tPtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2)))))
addToFunctionSummaryMap(
- "mmap64",
- Summary(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, *Off64_tTy},
- RetType{VoidPtrTy}, NoEvalCall)
+ "getsockname",
+ Signature(ArgTypes{IntTy, Irrelevant, Socklen_tPtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
.ArgConstraint(
- ArgumentCondition(1, WithinRange, Range(1, SizeMax)))
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ // int connect(int socket, const struct sockaddr *address, socklen_t
+ // address_len);
+ if (!addToFunctionSummaryMap(
+ "connect",
+ Signature(ArgTypes{IntTy, ConstStructSockaddrPtrTy, Socklen_tTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))))
+ addToFunctionSummaryMap(
+ "connect",
+ Signature(ArgTypes{IntTy, Irrelevant, Socklen_tTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
.ArgConstraint(
- ArgumentCondition(4, WithinRange, Range(0, *Off64_tMax))));
- }
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- // int pipe(int fildes[2]);
+ auto Recvfrom =
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
+ /*BufSize=*/ArgNo(2)));
+ if (!addToFunctionSummaryMap(
+ "recvfrom",
+ // ssize_t recvfrom(int socket, void *restrict buffer,
+ // size_t length,
+ // int flags, struct sockaddr *restrict address,
+ // socklen_t *restrict address_len);
+ Signature(ArgTypes{IntTy, VoidPtrRestrictTy, SizeTy, IntTy,
+ StructSockaddrPtrRestrictTy,
+ Socklen_tPtrRestrictTy},
+ RetType{Ssize_tTy}),
+ Recvfrom))
+ addToFunctionSummaryMap(
+ "recvfrom",
+ Signature(ArgTypes{IntTy, VoidPtrRestrictTy, SizeTy, IntTy,
+ Irrelevant, Socklen_tPtrRestrictTy},
+ RetType{Ssize_tTy}),
+ Recvfrom);
+
+ auto Sendto =
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
+ /*BufSize=*/ArgNo(2)));
+ if (!addToFunctionSummaryMap(
+ "sendto",
+ // ssize_t sendto(int socket, const void *message, size_t length,
+ // int flags, const struct sockaddr *dest_addr,
+ // socklen_t dest_len);
+ Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTy, IntTy,
+ ConstStructSockaddrPtrTy, Socklen_tTy},
+ RetType{Ssize_tTy}),
+ Sendto))
+ addToFunctionSummaryMap(
+ "sendto",
+ Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTy, IntTy, Irrelevant,
+ Socklen_tTy},
+ RetType{Ssize_tTy}),
+ Sendto);
+
+ // int listen(int sockfd, int backlog);
+ addToFunctionSummaryMap("listen",
+ Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(ArgumentCondition(
+ 0, WithinRange, Range(0, IntMax))));
+
+ // ssize_t recv(int sockfd, void *buf, size_t len, int flags);
addToFunctionSummaryMap(
- "pipe", Summary(ArgTypes{IntPtrTy}, RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ "recv",
+ Signature(ArgTypes{IntTy, VoidPtrTy, SizeTy, IntTy},
+ RetType{Ssize_tTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
+ /*BufSize=*/ArgNo(2))));
- if (Off_tTy)
- // off_t lseek(int fildes, off_t offset, int whence);
- addToFunctionSummaryMap(
- "lseek", Summary(ArgTypes{IntTy, *Off_tTy, IntTy}, RetType{*Off_tTy},
- NoEvalCall)
- .ArgConstraint(ArgumentCondition(0, WithinRange,
- Range(0, IntMax))));
+ Optional<QualType> StructMsghdrTy = lookupTy("msghdr");
+ Optional<QualType> StructMsghdrPtrTy = getPointerTy(StructMsghdrTy);
+ Optional<QualType> ConstStructMsghdrPtrTy =
+ getPointerTy(getConstTy(StructMsghdrTy));
- Optional<QualType> Ssize_tTy = lookupType("ssize_t", ACtx);
+ // ssize_t recvmsg(int sockfd, struct msghdr *msg, int flags);
+ addToFunctionSummaryMap(
+ "recvmsg",
+ Signature(ArgTypes{IntTy, StructMsghdrPtrTy, IntTy},
+ RetType{Ssize_tTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- if (Ssize_tTy) {
- // ssize_t readlink(const char *restrict path, char *restrict buf,
- // size_t bufsize);
- addToFunctionSummaryMap(
- "readlink",
- Summary(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy, SizeTy},
- RetType{*Ssize_tTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1)))
- .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
- /*BufSize=*/ArgNo(2)))
- .ArgConstraint(
- ArgumentCondition(2, WithinRange, Range(0, SizeMax))));
+ // ssize_t sendmsg(int sockfd, const struct msghdr *msg, int flags);
+ addToFunctionSummaryMap(
+ "sendmsg",
+ Signature(ArgTypes{IntTy, ConstStructMsghdrPtrTy, IntTy},
+ RetType{Ssize_tTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- // ssize_t readlinkat(int fd, const char *restrict path,
- // char *restrict buf, size_t bufsize);
- addToFunctionSummaryMap(
- "readlinkat", Summary(ArgTypes{IntTy, ConstCharPtrRestrictTy,
- CharPtrRestrictTy, SizeTy},
- RetType{*Ssize_tTy}, NoEvalCall)
- .ArgConstraint(ArgumentCondition(0, WithinRange,
- Range(0, IntMax)))
- .ArgConstraint(NotNull(ArgNo(1)))
- .ArgConstraint(NotNull(ArgNo(2)))
- .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(2),
- /*BufSize=*/ArgNo(3)))
- .ArgConstraint(ArgumentCondition(
- 3, WithinRange, Range(0, SizeMax))));
- }
+ // int setsockopt(int socket, int level, int option_name,
+ // const void *option_value, socklen_t option_len);
+ addToFunctionSummaryMap(
+ "setsockopt",
+ Signature(ArgTypes{IntTy, IntTy, IntTy, ConstVoidPtrTy, Socklen_tTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(3)))
+ .ArgConstraint(
+ BufferSize(/*Buffer=*/ArgNo(3), /*BufSize=*/ArgNo(4)))
+ .ArgConstraint(
+ ArgumentCondition(4, WithinRange, Range(0, Socklen_tMax))));
- // int renameat(int olddirfd, const char *oldpath, int newdirfd, const char
- // *newpath);
- addToFunctionSummaryMap("renameat", Summary(ArgTypes{IntTy, ConstCharPtrTy,
- IntTy, ConstCharPtrTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(1)))
- .ArgConstraint(NotNull(ArgNo(3))));
+ // int getsockopt(int socket, int level, int option_name,
+ // void *restrict option_value,
+ // socklen_t *restrict option_len);
+ addToFunctionSummaryMap(
+ "getsockopt",
+ Signature(ArgTypes{IntTy, IntTy, IntTy, VoidPtrRestrictTy,
+ Socklen_tPtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(3)))
+ .ArgConstraint(NotNull(ArgNo(4))));
+
+ // ssize_t send(int sockfd, const void *buf, size_t len, int flags);
+ addToFunctionSummaryMap(
+ "send",
+ Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTy, IntTy},
+ RetType{Ssize_tTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
+ /*BufSize=*/ArgNo(2))));
- // char *realpath(const char *restrict file_name,
- // char *restrict resolved_name);
+ // int socketpair(int domain, int type, int protocol, int sv[2]);
+ addToFunctionSummaryMap(
+ "socketpair",
+ Signature(ArgTypes{IntTy, IntTy, IntTy, IntPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(3))));
+
+ // int getnameinfo(const struct sockaddr *restrict sa, socklen_t salen,
+ // char *restrict node, socklen_t nodelen,
+ // char *restrict service,
+ // socklen_t servicelen, int flags);
+ //
+ // This is defined in netdb.h. And contrary to 'socket.h', the sockaddr
+ // parameter is never handled as a transparent union in netdb.h
addToFunctionSummaryMap(
- "realpath", Summary(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy},
- RetType{CharPtrTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ "getnameinfo",
+ Signature(ArgTypes{ConstStructSockaddrPtrRestrictTy, Socklen_tTy,
+ CharPtrRestrictTy, Socklen_tTy, CharPtrRestrictTy,
+ Socklen_tTy, IntTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(
+ BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1)))
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(0, Socklen_tMax)))
+ .ArgConstraint(
+ BufferSize(/*Buffer=*/ArgNo(2), /*BufSize=*/ArgNo(3)))
+ .ArgConstraint(
+ ArgumentCondition(3, WithinRange, Range(0, Socklen_tMax)))
+ .ArgConstraint(
+ BufferSize(/*Buffer=*/ArgNo(4), /*BufSize=*/ArgNo(5)))
+ .ArgConstraint(
+ ArgumentCondition(5, WithinRange, Range(0, Socklen_tMax))));
- QualType CharPtrConstPtr = ACtx.getPointerType(CharPtrTy.withConst());
+ Optional<QualType> StructUtimbufTy = lookupTy("utimbuf");
+ Optional<QualType> StructUtimbufPtrTy = getPointerTy(StructUtimbufTy);
- // int execv(const char *path, char *const argv[]);
- addToFunctionSummaryMap("execv",
- Summary(ArgTypes{ConstCharPtrTy, CharPtrConstPtr},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ // int utime(const char *filename, struct utimbuf *buf);
+ addToFunctionSummaryMap(
+ "utime",
+ Signature(ArgTypes{ConstCharPtrTy, StructUtimbufPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
- // int execvp(const char *file, char *const argv[]);
- addToFunctionSummaryMap("execvp",
- Summary(ArgTypes{ConstCharPtrTy, CharPtrConstPtr},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0))));
+ Optional<QualType> StructTimespecTy = lookupTy("timespec");
+ Optional<QualType> StructTimespecPtrTy = getPointerTy(StructTimespecTy);
+ Optional<QualType> ConstStructTimespecPtrTy =
+ getPointerTy(getConstTy(StructTimespecTy));
- // int getopt(int argc, char * const argv[], const char *optstring);
+ // int futimens(int fd, const struct timespec times[2]);
addToFunctionSummaryMap(
- "getopt",
- Summary(ArgTypes{IntTy, CharPtrConstPtr, ConstCharPtrTy},
- RetType{IntTy}, NoEvalCall)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ "futimens",
+ Signature(ArgTypes{IntTy, ConstStructTimespecPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ // int utimensat(int dirfd, const char *pathname,
+ // const struct timespec times[2], int flags);
+ addToFunctionSummaryMap("utimensat",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy,
+ ConstStructTimespecPtrTy, IntTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ Optional<QualType> StructTimevalTy = lookupTy("timeval");
+ Optional<QualType> ConstStructTimevalPtrTy =
+ getPointerTy(getConstTy(StructTimevalTy));
+
+ // int utimes(const char *filename, const struct timeval times[2]);
+ addToFunctionSummaryMap(
+ "utimes",
+ Signature(ArgTypes{ConstCharPtrTy, ConstStructTimevalPtrTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
+ addToFunctionSummaryMap(
+ "nanosleep",
+ Signature(ArgTypes{ConstStructTimespecPtrTy, StructTimespecPtrTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ Optional<QualType> Time_tTy = lookupTy("time_t");
+ Optional<QualType> ConstTime_tPtrTy = getPointerTy(getConstTy(Time_tTy));
+ Optional<QualType> ConstTime_tPtrRestrictTy =
+ getRestrictTy(ConstTime_tPtrTy);
+
+ Optional<QualType> StructTmTy = lookupTy("tm");
+ Optional<QualType> StructTmPtrTy = getPointerTy(StructTmTy);
+ Optional<QualType> StructTmPtrRestrictTy = getRestrictTy(StructTmPtrTy);
+ Optional<QualType> ConstStructTmPtrTy =
+ getPointerTy(getConstTy(StructTmTy));
+ Optional<QualType> ConstStructTmPtrRestrictTy =
+ getRestrictTy(ConstStructTmPtrTy);
+
+ // struct tm * localtime(const time_t *tp);
+ addToFunctionSummaryMap(
+ "localtime",
+ Signature(ArgTypes{ConstTime_tPtrTy}, RetType{StructTmPtrTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ // struct tm *localtime_r(const time_t *restrict timer,
+ // struct tm *restrict result);
+ addToFunctionSummaryMap(
+ "localtime_r",
+ Signature(ArgTypes{ConstTime_tPtrRestrictTy, StructTmPtrRestrictTy},
+ RetType{StructTmPtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // char *asctime_r(const struct tm *restrict tm, char *restrict buf);
+ addToFunctionSummaryMap(
+ "asctime_r",
+ Signature(ArgTypes{ConstStructTmPtrRestrictTy, CharPtrRestrictTy},
+ RetType{CharPtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
+ /*MinBufSize=*/BVF.getValue(26, IntTy))));
+
+ // char *ctime_r(const time_t *timep, char *buf);
+ addToFunctionSummaryMap(
+ "ctime_r",
+ Signature(ArgTypes{ConstTime_tPtrTy, CharPtrTy}, RetType{CharPtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(BufferSize(
+ /*Buffer=*/ArgNo(1),
+ /*MinBufSize=*/BVF.getValue(26, IntTy))));
+
+ // struct tm *gmtime_r(const time_t *restrict timer,
+ // struct tm *restrict result);
+ addToFunctionSummaryMap(
+ "gmtime_r",
+ Signature(ArgTypes{ConstTime_tPtrRestrictTy, StructTmPtrRestrictTy},
+ RetType{StructTmPtrTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // struct tm * gmtime(const time_t *tp);
+ addToFunctionSummaryMap(
+ "gmtime", Signature(ArgTypes{ConstTime_tPtrTy}, RetType{StructTmPtrTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ Optional<QualType> Clockid_tTy = lookupTy("clockid_t");
+
+ // int clock_gettime(clockid_t clock_id, struct timespec *tp);
+ addToFunctionSummaryMap(
+ "clock_gettime",
+ Signature(ArgTypes{Clockid_tTy, StructTimespecPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ Optional<QualType> StructItimervalTy = lookupTy("itimerval");
+ Optional<QualType> StructItimervalPtrTy = getPointerTy(StructItimervalTy);
+
+ // int getitimer(int which, struct itimerval *curr_value);
+ addToFunctionSummaryMap(
+ "getitimer",
+ Signature(ArgTypes{IntTy, StructItimervalPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZeroOrMinusOne)
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ Optional<QualType> Pthread_cond_tTy = lookupTy("pthread_cond_t");
+ Optional<QualType> Pthread_cond_tPtrTy = getPointerTy(Pthread_cond_tTy);
+ Optional<QualType> Pthread_tTy = lookupTy("pthread_t");
+ Optional<QualType> Pthread_tPtrTy = getPointerTy(Pthread_tTy);
+ Optional<QualType> Pthread_tPtrRestrictTy = getRestrictTy(Pthread_tPtrTy);
+ Optional<QualType> Pthread_mutex_tTy = lookupTy("pthread_mutex_t");
+ Optional<QualType> Pthread_mutex_tPtrTy = getPointerTy(Pthread_mutex_tTy);
+ Optional<QualType> Pthread_mutex_tPtrRestrictTy =
+ getRestrictTy(Pthread_mutex_tPtrTy);
+ Optional<QualType> Pthread_attr_tTy = lookupTy("pthread_attr_t");
+ Optional<QualType> Pthread_attr_tPtrTy = getPointerTy(Pthread_attr_tTy);
+ Optional<QualType> ConstPthread_attr_tPtrTy =
+ getPointerTy(getConstTy(Pthread_attr_tTy));
+ Optional<QualType> ConstPthread_attr_tPtrRestrictTy =
+ getRestrictTy(ConstPthread_attr_tPtrTy);
+ Optional<QualType> Pthread_mutexattr_tTy = lookupTy("pthread_mutexattr_t");
+ Optional<QualType> ConstPthread_mutexattr_tPtrTy =
+ getPointerTy(getConstTy(Pthread_mutexattr_tTy));
+ Optional<QualType> ConstPthread_mutexattr_tPtrRestrictTy =
+ getRestrictTy(ConstPthread_mutexattr_tPtrTy);
+
+ QualType PthreadStartRoutineTy = getPointerTy(
+ ACtx.getFunctionType(/*ResultTy=*/VoidPtrTy, /*Args=*/VoidPtrTy,
+ FunctionProtoType::ExtProtoInfo()));
+
+ // int pthread_cond_signal(pthread_cond_t *cond);
+ // int pthread_cond_broadcast(pthread_cond_t *cond);
+ addToFunctionSummaryMap(
+ {"pthread_cond_signal", "pthread_cond_broadcast"},
+ Signature(ArgTypes{Pthread_cond_tPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ // int pthread_create(pthread_t *restrict thread,
+ // const pthread_attr_t *restrict attr,
+ // void *(*start_routine)(void*), void *restrict arg);
+ addToFunctionSummaryMap(
+ "pthread_create",
+ Signature(ArgTypes{Pthread_tPtrRestrictTy,
+ ConstPthread_attr_tPtrRestrictTy,
+ PthreadStartRoutineTy, VoidPtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(2))));
+
+ // int pthread_attr_destroy(pthread_attr_t *attr);
+ // int pthread_attr_init(pthread_attr_t *attr);
+ addToFunctionSummaryMap(
+ {"pthread_attr_destroy", "pthread_attr_init"},
+ Signature(ArgTypes{Pthread_attr_tPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ // int pthread_attr_getstacksize(const pthread_attr_t *restrict attr,
+ // size_t *restrict stacksize);
+ // int pthread_attr_getguardsize(const pthread_attr_t *restrict attr,
+ // size_t *restrict guardsize);
+ addToFunctionSummaryMap(
+ {"pthread_attr_getstacksize", "pthread_attr_getguardsize"},
+ Signature(ArgTypes{ConstPthread_attr_tPtrRestrictTy, SizePtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize);
+ // int pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize);
+ addToFunctionSummaryMap(
+ {"pthread_attr_setstacksize", "pthread_attr_setguardsize"},
+ Signature(ArgTypes{Pthread_attr_tPtrTy, SizeTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(0, SizeMax))));
+
+ // int pthread_mutex_init(pthread_mutex_t *restrict mutex, const
+ // pthread_mutexattr_t *restrict attr);
+ addToFunctionSummaryMap(
+ "pthread_mutex_init",
+ Signature(ArgTypes{Pthread_mutex_tPtrRestrictTy,
+ ConstPthread_mutexattr_tPtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ // int pthread_mutex_destroy(pthread_mutex_t *mutex);
+ // int pthread_mutex_lock(pthread_mutex_t *mutex);
+ // int pthread_mutex_trylock(pthread_mutex_t *mutex);
+ // int pthread_mutex_unlock(pthread_mutex_t *mutex);
+ addToFunctionSummaryMap(
+ {"pthread_mutex_destroy", "pthread_mutex_lock", "pthread_mutex_trylock",
+ "pthread_mutex_unlock"},
+ Signature(ArgTypes{Pthread_mutex_tPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
}
// Functions for testing.
if (ChecksEnabled[CK_StdCLibraryFunctionsTesterChecker]) {
addToFunctionSummaryMap(
"__two_constrained_args",
- Summary(ArgTypes{IntTy, IntTy}, RetType{IntTy}, EvalCallAsPure)
+ Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.ArgConstraint(ArgumentCondition(0U, WithinRange, SingleValue(1)))
.ArgConstraint(ArgumentCondition(1U, WithinRange, SingleValue(1))));
addToFunctionSummaryMap(
- "__arg_constrained_twice",
- Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ "__arg_constrained_twice", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(1)))
.ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(2))));
addToFunctionSummaryMap(
"__defaultparam",
- Summary(ArgTypes{Irrelevant, IntTy}, RetType{IntTy}, EvalCallAsPure)
- .ArgConstraint(NotNull(ArgNo(0))));
- addToFunctionSummaryMap("__variadic",
- Summary(ArgTypes{VoidPtrTy, ConstCharPtrTy},
- RetType{IntTy}, EvalCallAsPure)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1))));
+ Signature(ArgTypes{Irrelevant, IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure).ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "__variadic",
+ Signature(ArgTypes{VoidPtrTy, ConstCharPtrTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
addToFunctionSummaryMap(
"__buf_size_arg_constraint",
- Summary(ArgTypes{ConstVoidPtrTy, SizeTy}, RetType{IntTy},
- EvalCallAsPure)
+ Signature(ArgTypes{ConstVoidPtrTy, SizeTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.ArgConstraint(
BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1))));
addToFunctionSummaryMap(
"__buf_size_arg_constraint_mul",
- Summary(ArgTypes{ConstVoidPtrTy, SizeTy, SizeTy}, RetType{IntTy},
- EvalCallAsPure)
+ Signature(ArgTypes{ConstVoidPtrTy, SizeTy, SizeTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1),
/*BufSizeMultiplier=*/ArgNo(2))));
+ addToFunctionSummaryMap(
+ "__buf_size_arg_constraint_concrete",
+ Signature(ArgTypes{ConstVoidPtrTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(0),
+ /*BufSize=*/BVF.getValue(10, IntTy))));
+ addToFunctionSummaryMap(
+ {"__test_restrict_param_0", "__test_restrict_param_1",
+ "__test_restrict_param_2"},
+ Signature(ArgTypes{VoidPtrRestrictTy}, RetType{VoidTy}),
+ Summary(EvalCallAsPure));
}
}
@@ -1749,7 +2496,8 @@ void ento::registerStdCLibraryFunctionsChecker(CheckerManager &mgr) {
mgr.getAnalyzerOptions().getCheckerBooleanOption(Checker, "ModelPOSIX");
}
-bool ento::shouldRegisterStdCLibraryFunctionsChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterStdCLibraryFunctionsChecker(
+ const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index f6abbe4f8f03..6b176b3c4e2b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -204,7 +204,8 @@ class StreamChecker : public Checker<check::PreCall, eval::Call,
BugType BT_IllegalWhence{this, "Illegal whence argument",
"Stream handling error"};
BugType BT_StreamEof{this, "Stream already in EOF", "Stream handling error"};
- BugType BT_ResourceLeak{this, "Resource leak", "Stream handling error"};
+ BugType BT_ResourceLeak{this, "Resource leak", "Stream handling error",
+ /*SuppressOnSink =*/true};
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
@@ -337,6 +338,12 @@ private:
/// to ensure uniform handling.
void reportFEofWarning(CheckerContext &C, ProgramStateRef State) const;
+ /// Emit resource leak warnings for the given symbols.
+ /// Createn a non-fatal error node for these, and returns it (if any warnings
+ /// were generated). Return value is non-null.
+ ExplodedNode *reportLeaks(const SmallVector<SymbolRef, 2> &LeakedSyms,
+ CheckerContext &C, ExplodedNode *Pred) const;
+
/// Find the description data of the function called by a call event.
/// Returns nullptr if no function is recognized.
const FnDescription *lookupFn(const CallEvent &Call) const {
@@ -956,28 +963,14 @@ void StreamChecker::reportFEofWarning(CheckerContext &C,
C.addTransition(State);
}
-void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
-
- // TODO: Clean up the state.
- const StreamMapTy &Map = State->get<StreamMap>();
- for (const auto &I : Map) {
- SymbolRef Sym = I.first;
- const StreamState &SS = I.second;
- if (!SymReaper.isDead(Sym) || !SS.isOpened())
- continue;
-
- ExplodedNode *N = C.generateErrorNode();
- if (!N)
- continue;
-
- // Do not warn for non-closed stream at program exit.
- ExplodedNode *Pred = C.getPredecessor();
- if (Pred && Pred->getCFGBlock() &&
- Pred->getCFGBlock()->hasNoReturnElement())
- continue;
+ExplodedNode *
+StreamChecker::reportLeaks(const SmallVector<SymbolRef, 2> &LeakedSyms,
+ CheckerContext &C, ExplodedNode *Pred) const {
+ ExplodedNode *Err = C.generateNonFatalErrorNode(C.getState(), Pred);
+ if (!Err)
+ return Pred;
+ for (SymbolRef LeakSym : LeakedSyms) {
// Resource leaks can result in multiple warning that describe the same kind
// of programming error:
// void f() {
@@ -989,8 +982,7 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
// from a different kinds of errors), the reduction in redundant reports
// makes this a worthwhile heuristic.
// FIXME: Add a checker option to turn this uniqueing feature off.
-
- const ExplodedNode *StreamOpenNode = getAcquisitionSite(N, Sym, C);
+ const ExplodedNode *StreamOpenNode = getAcquisitionSite(Err, LeakSym, C);
assert(StreamOpenNode && "Could not find place of stream opening.");
PathDiagnosticLocation LocUsedForUniqueing =
PathDiagnosticLocation::createBegin(
@@ -1000,12 +992,38 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
std::unique_ptr<PathSensitiveBugReport> R =
std::make_unique<PathSensitiveBugReport>(
BT_ResourceLeak,
- "Opened stream never closed. Potential resource leak.", N,
+ "Opened stream never closed. Potential resource leak.", Err,
LocUsedForUniqueing,
StreamOpenNode->getLocationContext()->getDecl());
- R->markInteresting(Sym);
+ R->markInteresting(LeakSym);
C.emitReport(std::move(R));
}
+
+ return Err;
+}
+
+void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ llvm::SmallVector<SymbolRef, 2> LeakedSyms;
+
+ const StreamMapTy &Map = State->get<StreamMap>();
+ for (const auto &I : Map) {
+ SymbolRef Sym = I.first;
+ const StreamState &SS = I.second;
+ if (!SymReaper.isDead(Sym))
+ continue;
+ if (SS.isOpened())
+ LeakedSyms.push_back(Sym);
+ State = State->remove<StreamMap>(Sym);
+ }
+
+ ExplodedNode *N = C.getPredecessor();
+ if (!LeakedSyms.empty())
+ N = reportLeaks(LeakedSyms, C, N);
+
+ C.addTransition(State, N);
}
ProgramStateRef StreamChecker::checkPointerEscape(
diff --git a/clang/lib/StaticAnalyzer/Checkers/Taint.cpp b/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
index 5b46ffb656cf..71b2ab834a07 100644
--- a/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
@@ -148,7 +148,7 @@ bool taint::isTainted(ProgramStateRef State, const Stmt *S,
}
bool taint::isTainted(ProgramStateRef State, SVal V, TaintTagType Kind) {
- if (const SymExpr *Sym = V.getAsSymExpr())
+ if (SymbolRef Sym = V.getAsSymbol())
return isTainted(State, Sym, Kind);
if (const MemRegion *Reg = V.getAsRegion())
return isTainted(State, Reg, Kind);
diff --git a/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index 3e0caaf79ca0..ebe5ad53cc30 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -11,6 +11,8 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/Type.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -54,10 +56,13 @@ public:
void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const;
};
-}
+} // namespace
void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
CheckerContext &Ctx) const {
+ // ObjCForCollection is a loop, but has no actual condition.
+ if (isa<ObjCForCollectionStmt>(Condition))
+ return;
SVal X = Ctx.getSVal(Condition);
if (X.isUndef()) {
// Generate a sink node, which implicitly marks both outgoing branches as
diff --git a/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index f49ee5fa5ad3..1c589e3468c2 100644
--- a/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -125,8 +125,8 @@ void VirtualCallChecker::checkPreCall(const CallEvent &Call,
OS << "Call to ";
if (IsPure)
OS << "pure ";
- OS << "virtual method '" << MD->getParent()->getNameAsString()
- << "::" << MD->getNameAsString() << "' during ";
+ OS << "virtual method '" << MD->getParent()->getDeclName()
+ << "::" << MD->getDeclName() << "' during ";
if (*ObState == ObjectState::CtorCalled)
OS << "construction ";
else
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
index 34c072ac2241..9c7a59971763 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
@@ -34,7 +34,9 @@ tryToFindPtrOrigin(const Expr *E, bool StopAtFirstRefCountedObj) {
}
if (auto *call = dyn_cast<CallExpr>(E)) {
if (auto *memberCall = dyn_cast<CXXMemberCallExpr>(call)) {
- if (isGetterOfRefCounted(memberCall->getMethodDecl())) {
+ Optional<bool> IsGetterOfRefCt =
+ isGetterOfRefCounted(memberCall->getMethodDecl());
+ if (IsGetterOfRefCt && *IsGetterOfRefCt) {
E = memberCall->getImplicitObjectArgument();
if (StopAtFirstRefCountedObj) {
return {E, true};
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
index 3956db933b35..97f75135bf92 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
@@ -76,8 +76,11 @@ public:
if (auto *MemberCXXRD = MemberType->getPointeeCXXRecordDecl()) {
// If we don't see the definition we just don't know.
- if (MemberCXXRD->hasDefinition() && isRefCountable(MemberCXXRD))
- reportBug(Member, MemberType, MemberCXXRD, RD);
+ if (MemberCXXRD->hasDefinition()) {
+ llvm::Optional<bool> isRCAble = isRefCountable(MemberCXXRD);
+ if (isRCAble && *isRCAble)
+ reportBug(Member, MemberType, MemberCXXRD, RD);
+ }
}
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
index 168cfd511170..a198943c9433 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/Optional.h"
using llvm::Optional;
using namespace clang;
@@ -20,6 +21,7 @@ namespace {
bool hasPublicRefAndDeref(const CXXRecordDecl *R) {
assert(R);
+ assert(R->hasDefinition());
bool hasRef = false;
bool hasDeref = false;
@@ -43,25 +45,29 @@ bool hasPublicRefAndDeref(const CXXRecordDecl *R) {
namespace clang {
-const CXXRecordDecl *isRefCountable(const CXXBaseSpecifier *Base) {
+llvm::Optional<const clang::CXXRecordDecl *>
+isRefCountable(const CXXBaseSpecifier *Base) {
assert(Base);
const Type *T = Base->getType().getTypePtrOrNull();
if (!T)
- return nullptr;
+ return llvm::None;
const CXXRecordDecl *R = T->getAsCXXRecordDecl();
if (!R)
- return nullptr;
+ return llvm::None;
+ if (!R->hasDefinition())
+ return llvm::None;
return hasPublicRefAndDeref(R) ? R : nullptr;
}
-bool isRefCountable(const CXXRecordDecl *R) {
+llvm::Optional<bool> isRefCountable(const CXXRecordDecl *R) {
assert(R);
R = R->getDefinition();
- assert(R);
+ if (!R)
+ return llvm::None;
if (hasPublicRefAndDeref(R))
return true;
@@ -69,13 +75,24 @@ bool isRefCountable(const CXXRecordDecl *R) {
CXXBasePaths Paths;
Paths.setOrigin(const_cast<CXXRecordDecl *>(R));
- const auto isRefCountableBase = [](const CXXBaseSpecifier *Base,
- CXXBasePath &) {
- return clang::isRefCountable(Base);
- };
+ bool AnyInconclusiveBase = false;
+ const auto isRefCountableBase =
+ [&AnyInconclusiveBase](const CXXBaseSpecifier *Base, CXXBasePath &) {
+ Optional<const clang::CXXRecordDecl *> IsRefCountable =
+ clang::isRefCountable(Base);
+ if (!IsRefCountable) {
+ AnyInconclusiveBase = true;
+ return false;
+ }
+ return (*IsRefCountable) != nullptr;
+ };
+
+ bool BasesResult = R->lookupInBases(isRefCountableBase, Paths,
+ /*LookupInDependent =*/true);
+ if (AnyInconclusiveBase)
+ return llvm::None;
- return R->lookupInBases(isRefCountableBase, Paths,
- /*LookupInDependent =*/true);
+ return BasesResult;
}
bool isCtorOfRefCounted(const clang::FunctionDecl *F) {
@@ -95,12 +112,19 @@ bool isCtorOfRefCounted(const clang::FunctionDecl *F) {
|| FunctionName == "Identifier";
}
-bool isUncounted(const CXXRecordDecl *Class) {
+llvm::Optional<bool> isUncounted(const CXXRecordDecl *Class) {
// Keep isRefCounted first as it's cheaper.
- return !isRefCounted(Class) && isRefCountable(Class);
+ if (isRefCounted(Class))
+ return false;
+
+ llvm::Optional<bool> IsRefCountable = isRefCountable(Class);
+ if (!IsRefCountable)
+ return llvm::None;
+
+ return (*IsRefCountable);
}
-bool isUncountedPtr(const Type *T) {
+llvm::Optional<bool> isUncountedPtr(const Type *T) {
assert(T);
if (T->isPointerType() || T->isReferenceType()) {
@@ -111,7 +135,7 @@ bool isUncountedPtr(const Type *T) {
return false;
}
-bool isGetterOfRefCounted(const CXXMethodDecl *M) {
+Optional<bool> isGetterOfRefCounted(const CXXMethodDecl *M) {
assert(M);
if (isa<CXXMethodDecl>(M)) {
@@ -133,9 +157,7 @@ bool isGetterOfRefCounted(const CXXMethodDecl *M) {
if (auto *maybeRefToRawOperator = dyn_cast<CXXConversionDecl>(M)) {
if (auto *targetConversionType =
maybeRefToRawOperator->getConversionType().getTypePtrOrNull()) {
- if (isUncountedPtr(targetConversionType)) {
- return true;
- }
+ return isUncountedPtr(targetConversionType);
}
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
index 83d9c0bcc13b..730a59977175 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
@@ -9,6 +9,8 @@
#ifndef LLVM_CLANG_ANALYZER_WEBKIT_PTRTYPESEMANTICS_H
#define LLVM_CLANG_ANALYZER_WEBKIT_PTRTYPESEMANTICS_H
+#include "llvm/ADT/APInt.h"
+
namespace clang {
class CXXBaseSpecifier;
class CXXMethodDecl;
@@ -25,30 +27,31 @@ class Type;
// Ref<T>.
/// \returns CXXRecordDecl of the base if the type is ref-countable, nullptr if
-/// not.
-const clang::CXXRecordDecl *isRefCountable(const clang::CXXBaseSpecifier *Base);
+/// not, None if inconclusive.
+llvm::Optional<const clang::CXXRecordDecl *>
+isRefCountable(const clang::CXXBaseSpecifier *Base);
-/// \returns true if \p Class is ref-countable, false if not.
-/// Asserts that \p Class IS a definition.
-bool isRefCountable(const clang::CXXRecordDecl *Class);
+/// \returns true if \p Class is ref-countable, false if not, None if
+/// inconclusive.
+llvm::Optional<bool> isRefCountable(const clang::CXXRecordDecl *Class);
/// \returns true if \p Class is ref-counted, false if not.
bool isRefCounted(const clang::CXXRecordDecl *Class);
/// \returns true if \p Class is ref-countable AND not ref-counted, false if
-/// not. Asserts that \p Class IS a definition.
-bool isUncounted(const clang::CXXRecordDecl *Class);
+/// not, None if inconclusive.
+llvm::Optional<bool> isUncounted(const clang::CXXRecordDecl *Class);
/// \returns true if \p T is either a raw pointer or reference to an uncounted
-/// class, false if not.
-bool isUncountedPtr(const clang::Type *T);
+/// class, false if not, None if inconclusive.
+llvm::Optional<bool> isUncountedPtr(const clang::Type *T);
/// \returns true if \p F creates ref-countable object from uncounted parameter,
/// false if not.
bool isCtorOfRefCounted(const clang::FunctionDecl *F);
/// \returns true if \p M is getter of a ref-counted class, false if not.
-bool isGetterOfRefCounted(const clang::CXXMethodDecl *Method);
+llvm::Optional<bool> isGetterOfRefCounted(const clang::CXXMethodDecl *Method);
/// \returns true if \p F is a conversion between ref-countable or ref-counted
/// pointer types.
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
index 81ce284c2dc7..fa9ece217cc0 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
@@ -76,19 +76,15 @@ public:
(AccSpec == AS_none && RD->isClass()))
return false;
- llvm::Optional<const clang::CXXRecordDecl *> MaybeRefCntblBaseRD =
+ llvm::Optional<const CXXRecordDecl *> RefCntblBaseRD =
isRefCountable(Base);
- if (!MaybeRefCntblBaseRD.hasValue())
+ if (!RefCntblBaseRD || !(*RefCntblBaseRD))
return false;
- const CXXRecordDecl *RefCntblBaseRD = MaybeRefCntblBaseRD.getValue();
- if (!RefCntblBaseRD)
- return false;
-
- const auto *Dtor = RefCntblBaseRD->getDestructor();
+ const auto *Dtor = (*RefCntblBaseRD)->getDestructor();
if (!Dtor || !Dtor->isVirtual()) {
ProblematicBaseSpecifier = Base;
- ProblematicBaseClass = RefCntblBaseRD;
+ ProblematicBaseClass = *RefCntblBaseRD;
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
index 940a1f349831..d70bd9489d2c 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
@@ -86,7 +86,8 @@ public:
continue; // FIXME? Should we bail?
// FIXME: more complex types (arrays, references to raw pointers, etc)
- if (!isUncountedPtr(ArgType))
+ Optional<bool> IsUncounted = isUncountedPtr(ArgType);
+ if (!IsUncounted || !(*IsUncounted))
continue;
const auto *Arg = CE->getArg(ArgIdx);
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp
new file mode 100644
index 000000000000..deebbd603b2c
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp
@@ -0,0 +1,107 @@
+//=======- UncountedLambdaCapturesChecker.cpp --------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DiagOutputUtils.h"
+#include "PtrTypesSemantics.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UncountedLambdaCapturesChecker
+ : public Checker<check::ASTDecl<TranslationUnitDecl>> {
+private:
+ BugType Bug{this, "Lambda capture of uncounted variable",
+ "WebKit coding guidelines"};
+ mutable BugReporter *BR;
+
+public:
+ void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
+ BugReporter &BRArg) const {
+ BR = &BRArg;
+
+ // The calls to checkAST* from AnalysisConsumer don't
+ // visit template instantiations or lambda classes. We
+ // want to visit those, so we make our own RecursiveASTVisitor.
+ struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
+ const UncountedLambdaCapturesChecker *Checker;
+ explicit LocalVisitor(const UncountedLambdaCapturesChecker *Checker)
+ : Checker(Checker) {
+ assert(Checker);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return false; }
+
+ bool VisitLambdaExpr(LambdaExpr *L) {
+ Checker->visitLambdaExpr(L);
+ return true;
+ }
+ };
+
+ LocalVisitor visitor(this);
+ visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ }
+
+ void visitLambdaExpr(LambdaExpr *L) const {
+ for (const LambdaCapture &C : L->captures()) {
+ if (C.capturesVariable()) {
+ VarDecl *CapturedVar = C.getCapturedVar();
+ if (auto *CapturedVarType = CapturedVar->getType().getTypePtrOrNull()) {
+ Optional<bool> IsUncountedPtr = isUncountedPtr(CapturedVarType);
+ if (IsUncountedPtr && *IsUncountedPtr) {
+ reportBug(C, CapturedVar, CapturedVarType);
+ }
+ }
+ }
+ }
+ }
+
+ void reportBug(const LambdaCapture &Capture, VarDecl *CapturedVar,
+ const Type *T) const {
+ assert(CapturedVar);
+
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+
+ if (Capture.isExplicit()) {
+ Os << "Captured ";
+ } else {
+ Os << "Implicitly captured ";
+ }
+ if (T->isPointerType()) {
+ Os << "raw-pointer ";
+ } else {
+ assert(T->isReferenceType());
+ Os << "reference ";
+ }
+
+ printQuotedQualifiedName(Os, Capture.getCapturedVar());
+ Os << " to uncounted type is unsafe.";
+
+ PathDiagnosticLocation BSLoc(Capture.getLocation(), BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ BR->emitReport(std::move(Report));
+ }
+};
+} // namespace
+
+void ento::registerUncountedLambdaCapturesChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<UncountedLambdaCapturesChecker>();
+}
+
+bool ento::shouldRegisterUncountedLambdaCapturesChecker(
+ const CheckerManager &mgr) {
+ return true;
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
new file mode 100644
index 000000000000..7e86f28cb70f
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
@@ -0,0 +1,251 @@
+//=======- UncountedLocalVarsChecker.cpp -------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTUtils.h"
+#include "DiagOutputUtils.h"
+#include "PtrTypesSemantics.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ParentMapContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/DenseSet.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+// for ( int a = ...) ... true
+// for ( int a : ...) ... true
+// if ( int* a = ) ... true
+// anything else ... false
+bool isDeclaredInForOrIf(const VarDecl *Var) {
+ assert(Var);
+ auto &ASTCtx = Var->getASTContext();
+ auto parent = ASTCtx.getParents(*Var);
+
+ if (parent.size() == 1) {
+ if (auto *DS = parent.begin()->get<DeclStmt>()) {
+ DynTypedNodeList grandParent = ASTCtx.getParents(*DS);
+ if (grandParent.size() == 1) {
+ return grandParent.begin()->get<ForStmt>() ||
+ grandParent.begin()->get<IfStmt>() ||
+ grandParent.begin()->get<CXXForRangeStmt>();
+ }
+ }
+ }
+ return false;
+}
+
+// FIXME: should be defined by anotations in the future
+bool isRefcountedStringsHack(const VarDecl *V) {
+ assert(V);
+ auto safeClass = [](const std::string &className) {
+ return className == "String" || className == "AtomString" ||
+ className == "UniquedString" || className == "Identifier";
+ };
+ QualType QT = V->getType();
+ auto *T = QT.getTypePtr();
+ if (auto *CXXRD = T->getAsCXXRecordDecl()) {
+ if (safeClass(safeGetName(CXXRD)))
+ return true;
+ }
+ if (T->isPointerType() || T->isReferenceType()) {
+ if (auto *CXXRD = T->getPointeeCXXRecordDecl()) {
+ if (safeClass(safeGetName(CXXRD)))
+ return true;
+ }
+ }
+ return false;
+}
+
+bool isGuardedScopeEmbeddedInGuardianScope(const VarDecl *Guarded,
+ const VarDecl *MaybeGuardian) {
+ assert(Guarded);
+ assert(MaybeGuardian);
+
+ if (!MaybeGuardian->isLocalVarDecl())
+ return false;
+
+ const CompoundStmt *guardiansClosestCompStmtAncestor = nullptr;
+
+ ASTContext &ctx = MaybeGuardian->getASTContext();
+
+ for (DynTypedNodeList guardianAncestors = ctx.getParents(*MaybeGuardian);
+ !guardianAncestors.empty();
+ guardianAncestors = ctx.getParents(
+ *guardianAncestors
+ .begin()) // FIXME - should we handle all of the parents?
+ ) {
+ for (auto &guardianAncestor : guardianAncestors) {
+ if (auto *CStmtParentAncestor = guardianAncestor.get<CompoundStmt>()) {
+ guardiansClosestCompStmtAncestor = CStmtParentAncestor;
+ break;
+ }
+ }
+ if (guardiansClosestCompStmtAncestor)
+ break;
+ }
+
+ if (!guardiansClosestCompStmtAncestor)
+ return false;
+
+ // We need to skip the first CompoundStmt to avoid situation when guardian is
+ // defined in the same scope as guarded variable.
+ bool HaveSkippedFirstCompoundStmt = false;
+ for (DynTypedNodeList guardedVarAncestors = ctx.getParents(*Guarded);
+ !guardedVarAncestors.empty();
+ guardedVarAncestors = ctx.getParents(
+ *guardedVarAncestors
+ .begin()) // FIXME - should we handle all of the parents?
+ ) {
+ for (auto &guardedVarAncestor : guardedVarAncestors) {
+ if (auto *CStmtAncestor = guardedVarAncestor.get<CompoundStmt>()) {
+ if (!HaveSkippedFirstCompoundStmt) {
+ HaveSkippedFirstCompoundStmt = true;
+ continue;
+ }
+ if (CStmtAncestor == guardiansClosestCompStmtAncestor)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+class UncountedLocalVarsChecker
+ : public Checker<check::ASTDecl<TranslationUnitDecl>> {
+ BugType Bug{this,
+ "Uncounted raw pointer or reference not provably backed by "
+ "ref-counted variable",
+ "WebKit coding guidelines"};
+ mutable BugReporter *BR;
+
+public:
+ void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
+ BugReporter &BRArg) const {
+ BR = &BRArg;
+
+ // The calls to checkAST* from AnalysisConsumer don't
+ // visit template instantiations or lambda classes. We
+ // want to visit those, so we make our own RecursiveASTVisitor.
+ struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
+ const UncountedLocalVarsChecker *Checker;
+ explicit LocalVisitor(const UncountedLocalVarsChecker *Checker)
+ : Checker(Checker) {
+ assert(Checker);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return false; }
+
+ bool VisitVarDecl(VarDecl *V) {
+ Checker->visitVarDecl(V);
+ return true;
+ }
+ };
+
+ LocalVisitor visitor(this);
+ visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ }
+
+ void visitVarDecl(const VarDecl *V) const {
+ if (shouldSkipVarDecl(V))
+ return;
+
+ const auto *ArgType = V->getType().getTypePtr();
+ if (!ArgType)
+ return;
+
+ Optional<bool> IsUncountedPtr = isUncountedPtr(ArgType);
+ if (IsUncountedPtr && *IsUncountedPtr) {
+ const Expr *const InitExpr = V->getInit();
+ if (!InitExpr)
+ return; // FIXME: later on we might warn on uninitialized vars too
+
+ const clang::Expr *const InitArgOrigin =
+ tryToFindPtrOrigin(InitExpr, /*StopAtFirstRefCountedObj=*/false)
+ .first;
+ if (!InitArgOrigin)
+ return;
+
+ if (isa<CXXThisExpr>(InitArgOrigin))
+ return;
+
+ if (auto *Ref = llvm::dyn_cast<DeclRefExpr>(InitArgOrigin)) {
+ if (auto *MaybeGuardian =
+ dyn_cast_or_null<VarDecl>(Ref->getFoundDecl())) {
+ const auto *MaybeGuardianArgType =
+ MaybeGuardian->getType().getTypePtr();
+ if (!MaybeGuardianArgType)
+ return;
+ const CXXRecordDecl *const MaybeGuardianArgCXXRecord =
+ MaybeGuardianArgType->getAsCXXRecordDecl();
+ if (!MaybeGuardianArgCXXRecord)
+ return;
+
+ if (MaybeGuardian->isLocalVarDecl() &&
+ (isRefCounted(MaybeGuardianArgCXXRecord) ||
+ isRefcountedStringsHack(MaybeGuardian)) &&
+ isGuardedScopeEmbeddedInGuardianScope(V, MaybeGuardian)) {
+ return;
+ }
+
+ // Parameters are guaranteed to be safe for the duration of the call
+ // by another checker.
+ if (isa<ParmVarDecl>(MaybeGuardian))
+ return;
+ }
+ }
+
+ reportBug(V);
+ }
+ }
+
+ bool shouldSkipVarDecl(const VarDecl *V) const {
+ assert(V);
+ if (!V->isLocalVarDecl())
+ return true;
+
+ if (isDeclaredInForOrIf(V))
+ return true;
+
+ return false;
+ }
+
+ void reportBug(const VarDecl *V) const {
+ assert(V);
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+
+ Os << "Local variable ";
+ printQuotedQualifiedName(Os, V);
+ Os << " is uncounted and unsafe.";
+
+ PathDiagnosticLocation BSLoc(V->getLocation(), BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(V->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
+};
+} // namespace
+
+void ento::registerUncountedLocalVarsChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<UncountedLocalVarsChecker>();
+}
+
+bool ento::shouldRegisterUncountedLocalVarsChecker(const CheckerManager &) {
+ return true;
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/Yaml.h b/clang/lib/StaticAnalyzer/Checkers/Yaml.h
index ec612dde3b8b..ec612dde3b8b 100755..100644
--- a/clang/lib/StaticAnalyzer/Checkers/Yaml.h
+++ b/clang/lib/StaticAnalyzer/Checkers/Yaml.h
diff --git a/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index 01ac2bc83bb6..8cd7f75e4e38 100644
--- a/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -40,7 +40,7 @@ void AnalyzerOptions::printFormattedEntry(
const size_t PadForDesc = InitialPad + EntryWidth;
FOut.PadToColumn(InitialPad) << EntryDescPair.first;
- // If the buffer's length is greater then PadForDesc, print a newline.
+ // If the buffer's length is greater than PadForDesc, print a newline.
if (FOut.getColumn() > PadForDesc)
FOut << '\n';
diff --git a/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index 73f057f09550..d1f5ac02278f 100644
--- a/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -42,7 +42,7 @@ void LazyCompoundValData::Profile(llvm::FoldingSetNodeID& ID,
}
void PointerToMemberData::Profile(
- llvm::FoldingSetNodeID& ID, const DeclaratorDecl *D,
+ llvm::FoldingSetNodeID &ID, const NamedDecl *D,
llvm::ImmutableList<const CXXBaseSpecifier *> L) {
ID.AddPointer(D);
ID.AddPointer(L.getInternalPointer());
@@ -159,17 +159,17 @@ BasicValueFactory::getLazyCompoundValData(const StoreRef &store,
}
const PointerToMemberData *BasicValueFactory::getPointerToMemberData(
- const DeclaratorDecl *DD, llvm::ImmutableList<const CXXBaseSpecifier *> L) {
+ const NamedDecl *ND, llvm::ImmutableList<const CXXBaseSpecifier *> L) {
llvm::FoldingSetNodeID ID;
- PointerToMemberData::Profile(ID, DD, L);
+ PointerToMemberData::Profile(ID, ND, L);
void *InsertPos;
PointerToMemberData *D =
PointerToMemberDataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!D) {
- D = (PointerToMemberData*) BPAlloc.Allocate<PointerToMemberData>();
- new (D) PointerToMemberData(DD, L);
+ D = (PointerToMemberData *)BPAlloc.Allocate<PointerToMemberData>();
+ new (D) PointerToMemberData(ND, L);
PointerToMemberDataSet.InsertNode(D, InsertPos);
}
@@ -180,25 +180,24 @@ const PointerToMemberData *BasicValueFactory::accumCXXBase(
llvm::iterator_range<CastExpr::path_const_iterator> PathRange,
const nonloc::PointerToMember &PTM) {
nonloc::PointerToMember::PTMDataType PTMDT = PTM.getPTMData();
- const DeclaratorDecl *DD = nullptr;
+ const NamedDecl *ND = nullptr;
llvm::ImmutableList<const CXXBaseSpecifier *> PathList;
- if (PTMDT.isNull() || PTMDT.is<const DeclaratorDecl *>()) {
- if (PTMDT.is<const DeclaratorDecl *>())
- DD = PTMDT.get<const DeclaratorDecl *>();
+ if (PTMDT.isNull() || PTMDT.is<const NamedDecl *>()) {
+ if (PTMDT.is<const NamedDecl *>())
+ ND = PTMDT.get<const NamedDecl *>();
PathList = CXXBaseListFactory.getEmptyList();
} else { // const PointerToMemberData *
- const PointerToMemberData *PTMD =
- PTMDT.get<const PointerToMemberData *>();
- DD = PTMD->getDeclaratorDecl();
+ const PointerToMemberData *PTMD = PTMDT.get<const PointerToMemberData *>();
+ ND = PTMD->getDeclaratorDecl();
PathList = PTMD->getCXXBaseList();
}
for (const auto &I : llvm::reverse(PathRange))
PathList = prependCXXBase(I, PathList);
- return getPointerToMemberData(DD, PathList);
+ return getPointerToMemberData(ND, PathList);
}
const llvm::APSInt*
diff --git a/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index 72be4e81c83d..bf38891b370a 100644
--- a/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -1570,9 +1570,8 @@ static Optional<size_t> getLengthOnSingleLine(const SourceManager &SM,
if (FID != SM.getFileID(ExpansionRange.getEnd()))
return None;
- bool Invalid;
- const llvm::MemoryBuffer *Buffer = SM.getBuffer(FID, &Invalid);
- if (Invalid)
+ Optional<MemoryBufferRef> Buffer = SM.getBufferOrNone(FID);
+ if (!Buffer)
return None;
unsigned BeginOffset = SM.getFileOffset(ExpansionRange.getBegin());
@@ -2194,8 +2193,8 @@ void BasicBugReport::Profile(llvm::FoldingSetNodeID& hash) const {
for (SourceRange range : Ranges) {
if (!range.isValid())
continue;
- hash.AddInteger(range.getBegin().getRawEncoding());
- hash.AddInteger(range.getEnd().getRawEncoding());
+ hash.Add(range.getBegin());
+ hash.Add(range.getEnd());
}
}
@@ -2217,8 +2216,8 @@ void PathSensitiveBugReport::Profile(llvm::FoldingSetNodeID &hash) const {
for (SourceRange range : Ranges) {
if (!range.isValid())
continue;
- hash.AddInteger(range.getBegin().getRawEncoding());
- hash.AddInteger(range.getEnd().getRawEncoding());
+ hash.Add(range.getBegin());
+ hash.Add(range.getEnd());
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index ef4d38ff498f..bc72f4f8c1e3 100644
--- a/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -2813,7 +2813,7 @@ UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
//===----------------------------------------------------------------------===//
FalsePositiveRefutationBRVisitor::FalsePositiveRefutationBRVisitor()
- : Constraints(ConstraintRangeTy::Factory().getEmptyMap()) {}
+ : Constraints(ConstraintMap::Factory().getEmptyMap()) {}
void FalsePositiveRefutationBRVisitor::finalizeVisitor(
BugReporterContext &BRC, const ExplodedNode *EndPathNode,
@@ -2855,9 +2855,8 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
void FalsePositiveRefutationBRVisitor::addConstraints(
const ExplodedNode *N, bool OverwriteConstraintsOnExistingSyms) {
// Collect new constraints
- const ConstraintRangeTy &NewCs = N->getState()->get<ConstraintRange>();
- ConstraintRangeTy::Factory &CF =
- N->getState()->get_context<ConstraintRange>();
+ ConstraintMap NewCs = getConstraintMap(N->getState());
+ ConstraintMap::Factory &CF = N->getState()->get_context<ConstraintMap>();
// Add constraints if we don't have them yet
for (auto const &C : NewCs) {
diff --git a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index 78d13ddfb773..a55d9302ca58 100644
--- a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -687,7 +687,7 @@ void CXXInstanceCall::getExtraInvalidatedValues(
// base class decl, rather than the class of the instance which needs to be
// checked for mutable fields.
// TODO: We might as well look at the dynamic type of the object.
- const Expr *Ex = getCXXThisExpr()->ignoreParenBaseCasts();
+ const Expr *Ex = getCXXThisExpr()->IgnoreParenBaseCasts();
QualType T = Ex->getType();
if (T->isPointerType()) // Arrow or implicit-this syntax?
T = T->getPointeeType();
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
index 725ff1002e29..3d44d2cbc069 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -93,7 +93,7 @@ StringRef CheckerContext::getMacroNameOrSpelling(SourceLocation &Loc) {
if (Loc.isMacroID())
return Lexer::getImmediateMacroName(Loc, getSourceManager(),
getLangOpts());
- SmallVector<char, 16> buf;
+ SmallString<16> buf;
return Lexer::getSpelling(Loc, buf, getSourceManager(), getLangOpts());
}
diff --git a/clang/lib/StaticAnalyzer/Core/DynamicType.cpp b/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
index e9b64fd79614..9ed915aafcab 100644
--- a/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
+++ b/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
@@ -65,6 +65,13 @@ const DynamicTypeInfo *getRawDynamicTypeInfo(ProgramStateRef State,
return State->get<DynamicTypeMap>(MR);
}
+static void unbox(QualType &Ty) {
+ // FIXME: Why are we being fed references to pointers in the first place?
+ while (Ty->isReferenceType() || Ty->isPointerType())
+ Ty = Ty->getPointeeType();
+ Ty = Ty.getCanonicalType().getUnqualifiedType();
+}
+
const DynamicCastInfo *getDynamicCastInfo(ProgramStateRef State,
const MemRegion *MR,
QualType CastFromTy,
@@ -73,6 +80,9 @@ const DynamicCastInfo *getDynamicCastInfo(ProgramStateRef State,
if (!Lookup)
return nullptr;
+ unbox(CastFromTy);
+ unbox(CastToTy);
+
for (const DynamicCastInfo &Cast : *Lookup)
if (Cast.equals(CastFromTy, CastToTy))
return &Cast;
@@ -112,6 +122,9 @@ ProgramStateRef setDynamicTypeAndCastInfo(ProgramStateRef State,
State = State->set<DynamicTypeMap>(MR, CastToTy);
}
+ unbox(CastFromTy);
+ unbox(CastToTy);
+
DynamicCastInfo::CastResult ResultKind =
CastSucceeds ? DynamicCastInfo::CastResult::Success
: DynamicCastInfo::CastResult::Failure;
diff --git a/clang/lib/StaticAnalyzer/Core/Environment.cpp b/clang/lib/StaticAnalyzer/Core/Environment.cpp
index 9e6d79bb7dcc..ee7474592528 100644
--- a/clang/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtObjC.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
@@ -85,6 +86,12 @@ SVal Environment::lookupExpr(const EnvironmentEntry &E) const {
SVal Environment::getSVal(const EnvironmentEntry &Entry,
SValBuilder& svalBuilder) const {
const Stmt *S = Entry.getStmt();
+ assert(!isa<ObjCForCollectionStmt>(S) &&
+ "Use ExprEngine::hasMoreIteration()!");
+ assert((isa<Expr>(S) || isa<ReturnStmt>(S)) &&
+ "Environment can only argue about Exprs, since only they express "
+ "a value! Any non-expression statement stored in Environment is a "
+ "result of a hack!");
const LocationContext *LCtx = Entry.getLocationContext();
switch (S->getStmtClass()) {
@@ -109,6 +116,7 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
case Stmt::StringLiteralClass:
case Stmt::TypeTraitExprClass:
case Stmt::SizeOfPackExprClass:
+ case Stmt::PredefinedExprClass:
// Known constants; defer to SValBuilder.
return svalBuilder.getConstantVal(cast<Expr>(S)).getValue();
@@ -183,18 +191,15 @@ EnvironmentManager::removeDeadBindings(Environment Env,
F.getTreeFactory());
// Iterate over the block-expr bindings.
- for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
+ for (Environment::iterator I = Env.begin(), End = Env.end(); I != End; ++I) {
const EnvironmentEntry &BlkExpr = I.getKey();
const SVal &X = I.getData();
- const bool IsBlkExprLive =
- SymReaper.isLive(BlkExpr.getStmt(), BlkExpr.getLocationContext());
+ const Expr *E = dyn_cast<Expr>(BlkExpr.getStmt());
+ if (!E)
+ continue;
- assert((isa<Expr>(BlkExpr.getStmt()) || !IsBlkExprLive) &&
- "Only Exprs can be live, LivenessAnalysis argues about the liveness "
- "of *values*!");
-
- if (IsBlkExprLive) {
+ if (SymReaper.isLive(E, BlkExpr.getLocationContext())) {
// Copy the binding to the new map.
EBMapRef = EBMapRef.add(BlkExpr, X);
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 265dcd134213..f285b652c175 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -169,7 +169,7 @@ public:
if (S) {
S->printJson(Out, Helper, PP, /*AddQuotes=*/true);
} else {
- Out << '\"' << I->getAnyMember()->getNameAsString() << '\"';
+ Out << '\"' << I->getAnyMember()->getDeclName() << '\"';
}
}
@@ -2129,6 +2129,83 @@ static const Stmt *ResolveCondition(const Stmt *Condition,
llvm_unreachable("could not resolve condition");
}
+using ObjCForLctxPair =
+ std::pair<const ObjCForCollectionStmt *, const LocationContext *>;
+
+REGISTER_MAP_WITH_PROGRAMSTATE(ObjCForHasMoreIterations, ObjCForLctxPair, bool)
+
+ProgramStateRef ExprEngine::setWhetherHasMoreIteration(
+ ProgramStateRef State, const ObjCForCollectionStmt *O,
+ const LocationContext *LC, bool HasMoreIteraton) {
+ assert(!State->contains<ObjCForHasMoreIterations>({O, LC}));
+ return State->set<ObjCForHasMoreIterations>({O, LC}, HasMoreIteraton);
+}
+
+ProgramStateRef
+ExprEngine::removeIterationState(ProgramStateRef State,
+ const ObjCForCollectionStmt *O,
+ const LocationContext *LC) {
+ assert(State->contains<ObjCForHasMoreIterations>({O, LC}));
+ return State->remove<ObjCForHasMoreIterations>({O, LC});
+}
+
+bool ExprEngine::hasMoreIteration(ProgramStateRef State,
+ const ObjCForCollectionStmt *O,
+ const LocationContext *LC) {
+ assert(State->contains<ObjCForHasMoreIterations>({O, LC}));
+ return *State->get<ObjCForHasMoreIterations>({O, LC});
+}
+
+/// Split the state on whether there are any more iterations left for this loop.
+/// Returns a (HasMoreIteration, HasNoMoreIteration) pair, or None when the
+/// acquisition of the loop condition value failed.
+static Optional<std::pair<ProgramStateRef, ProgramStateRef>>
+assumeCondition(const Stmt *Condition, ExplodedNode *N) {
+ ProgramStateRef State = N->getState();
+ if (const auto *ObjCFor = dyn_cast<ObjCForCollectionStmt>(Condition)) {
+ bool HasMoreIteraton =
+ ExprEngine::hasMoreIteration(State, ObjCFor, N->getLocationContext());
+ // Checkers have already ran on branch conditions, so the current
+ // information as to whether the loop has more iteration becomes outdated
+ // after this point.
+ State = ExprEngine::removeIterationState(State, ObjCFor,
+ N->getLocationContext());
+ if (HasMoreIteraton)
+ return std::pair<ProgramStateRef, ProgramStateRef>{State, nullptr};
+ else
+ return std::pair<ProgramStateRef, ProgramStateRef>{nullptr, State};
+ }
+ SVal X = State->getSVal(Condition, N->getLocationContext());
+
+ if (X.isUnknownOrUndef()) {
+ // Give it a chance to recover from unknown.
+ if (const auto *Ex = dyn_cast<Expr>(Condition)) {
+ if (Ex->getType()->isIntegralOrEnumerationType()) {
+ // Try to recover some path-sensitivity. Right now casts of symbolic
+ // integers that promote their values are currently not tracked well.
+ // If 'Condition' is such an expression, try and recover the
+ // underlying value and use that instead.
+ SVal recovered =
+ RecoverCastedSymbol(State, Condition, N->getLocationContext(),
+ N->getState()->getStateManager().getContext());
+
+ if (!recovered.isUnknown()) {
+ X = recovered;
+ }
+ }
+ }
+ }
+
+ // If the condition is still unknown, give up.
+ if (X.isUnknownOrUndef())
+ return None;
+
+ DefinedSVal V = X.castAs<DefinedSVal>();
+
+ ProgramStateRef StTrue, StFalse;
+ return State->assume(V);
+}
+
void ExprEngine::processBranch(const Stmt *Condition,
NodeBuilderContext& BldCtx,
ExplodedNode *Pred,
@@ -2165,48 +2242,28 @@ void ExprEngine::processBranch(const Stmt *Condition,
return;
BranchNodeBuilder builder(CheckersOutSet, Dst, BldCtx, DstT, DstF);
- for (const auto PredI : CheckersOutSet) {
- if (PredI->isSink())
+ for (ExplodedNode *PredN : CheckersOutSet) {
+ if (PredN->isSink())
continue;
- ProgramStateRef PrevState = PredI->getState();
- SVal X = PrevState->getSVal(Condition, PredI->getLocationContext());
-
- if (X.isUnknownOrUndef()) {
- // Give it a chance to recover from unknown.
- if (const auto *Ex = dyn_cast<Expr>(Condition)) {
- if (Ex->getType()->isIntegralOrEnumerationType()) {
- // Try to recover some path-sensitivity. Right now casts of symbolic
- // integers that promote their values are currently not tracked well.
- // If 'Condition' is such an expression, try and recover the
- // underlying value and use that instead.
- SVal recovered = RecoverCastedSymbol(PrevState, Condition,
- PredI->getLocationContext(),
- getContext());
-
- if (!recovered.isUnknown()) {
- X = recovered;
- }
- }
- }
- }
+ ProgramStateRef PrevState = PredN->getState();
- // If the condition is still unknown, give up.
- if (X.isUnknownOrUndef()) {
- builder.generateNode(PrevState, true, PredI);
- builder.generateNode(PrevState, false, PredI);
+ ProgramStateRef StTrue, StFalse;
+ if (const auto KnownCondValueAssumption = assumeCondition(Condition, PredN))
+ std::tie(StTrue, StFalse) = *KnownCondValueAssumption;
+ else {
+ assert(!isa<ObjCForCollectionStmt>(Condition));
+ builder.generateNode(PrevState, true, PredN);
+ builder.generateNode(PrevState, false, PredN);
continue;
}
-
- DefinedSVal V = X.castAs<DefinedSVal>();
-
- ProgramStateRef StTrue, StFalse;
- std::tie(StTrue, StFalse) = PrevState->assume(V);
+ if (StTrue && StFalse)
+ assert(!isa<ObjCForCollectionStmt>(Condition));;
// Process the true branch.
if (builder.isFeasible(true)) {
if (StTrue)
- builder.generateNode(StTrue, true, PredI);
+ builder.generateNode(StTrue, true, PredN);
else
builder.markInfeasible(true);
}
@@ -2214,7 +2271,7 @@ void ExprEngine::processBranch(const Stmt *Condition,
// Process the false branch.
if (builder.isFeasible(false)) {
if (StFalse)
- builder.generateNode(StFalse, false, PredI);
+ builder.generateNode(StFalse, false, PredN);
else
builder.markInfeasible(false);
}
@@ -2530,16 +2587,8 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
return;
}
if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) {
- // FIXME: Compute lvalue of field pointers-to-member.
- // Right now we just use a non-null void pointer, so that it gives proper
- // results in boolean contexts.
- // FIXME: Maybe delegate this to the surrounding operator&.
- // Note how this expression is lvalue, however pointer-to-member is NonLoc.
- SVal V = svalBuilder.conjureSymbolVal(Ex, LCtx, getContext().VoidPtrTy,
- currBldrCtx->blockCount());
- state = state->assume(V.castAs<DefinedOrUnknownSVal>(), true);
- Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), nullptr,
- ProgramPoint::PostLValueKind);
+ // Delegate all work related to pointer to members to the surrounding
+ // operator&.
return;
}
if (isa<BindingDecl>(D)) {
@@ -3100,7 +3149,7 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
if (Stop(N))
return true;
- if (N->succ_size() != 1 || !isNodeHidden(N->getFirstSucc()))
+ if (N->succ_size() != 1 || !isNodeHidden(N->getFirstSucc(), nullptr))
break;
PostCallback(N);
@@ -3109,7 +3158,7 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
return false;
}
- static bool isNodeHidden(const ExplodedNode *N) {
+ static bool isNodeHidden(const ExplodedNode *N, const ExplodedGraph *G) {
return N->isTrivial();
}
@@ -3162,8 +3211,9 @@ void ExprEngine::ViewGraph(bool trim) {
#ifndef NDEBUG
std::string Filename = DumpGraph(trim);
llvm::DisplayGraph(Filename, false, llvm::GraphProgram::DOT);
-#endif
+#else
llvm::errs() << "Warning: viewing graph requires assertions" << "\n";
+#endif
}
@@ -3171,8 +3221,9 @@ void ExprEngine::ViewGraph(ArrayRef<const ExplodedNode*> Nodes) {
#ifndef NDEBUG
std::string Filename = DumpGraph(Nodes);
llvm::DisplayGraph(Filename, false, llvm::GraphProgram::DOT);
-#endif
+#else
llvm::errs() << "Warning: viewing graph requires assertions" << "\n";
+#endif
}
std::string ExprEngine::DumpGraph(bool trim, StringRef Filename) {
@@ -3209,15 +3260,17 @@ std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode*> Nodes,
if (!TrimmedG.get()) {
llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
+ return "";
} else {
return llvm::WriteGraph(TrimmedG.get(), "TrimmedExprEngine",
/*ShortNames=*/false,
/*Title=*/"Trimmed Exploded Graph",
/*Filename=*/std::string(Filename));
}
-#endif
+#else
llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
return "";
+#endif
}
void *ProgramStateTrait<ReplayWithoutInlining>::GDMIndex() {
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index c5e38cc7423d..18d1b2169eed 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -418,6 +418,8 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
case CK_LValueBitCast:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
@@ -991,10 +993,11 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Ex)) {
const ValueDecl *VD = DRE->getDecl();
- if (isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD)) {
+ if (isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD) ||
+ isa<IndirectFieldDecl>(VD)) {
ProgramStateRef State = (*I)->getState();
const LocationContext *LCtx = (*I)->getLocationContext();
- SVal SV = svalBuilder.getMemberPointer(cast<DeclaratorDecl>(VD));
+ SVal SV = svalBuilder.getMemberPointer(cast<NamedDecl>(VD));
Bldr.generateNode(U, *I, State->BindExpr(U, LCtx, SV));
break;
}
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 38a680eb04c0..cab65687444b 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -132,10 +132,20 @@ SVal ExprEngine::computeObjectUnderConstruction(
case ConstructionContext::SimpleConstructorInitializerKind: {
const auto *ICC = cast<ConstructorInitializerConstructionContext>(CC);
const auto *Init = ICC->getCXXCtorInitializer();
- assert(Init->isAnyMemberInitializer());
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
Loc ThisPtr = SVB.getCXXThis(CurCtor, LCtx->getStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
+ if (Init->isBaseInitializer()) {
+ const auto *ThisReg = cast<SubRegion>(ThisVal.getAsRegion());
+ const CXXRecordDecl *BaseClass =
+ Init->getBaseClass()->getAsCXXRecordDecl();
+ const auto *BaseReg =
+ MRMgr.getCXXBaseObjectRegion(BaseClass, ThisReg,
+ Init->isBaseVirtual());
+ return SVB.makeLoc(BaseReg);
+ }
+ if (Init->isDelegatingInitializer())
+ return ThisVal;
const ValueDecl *Field;
SVal FieldVal;
@@ -364,8 +374,12 @@ ProgramStateRef ExprEngine::updateObjectsUnderConstruction(
case ConstructionContext::CXX17ElidedCopyConstructorInitializerKind:
case ConstructionContext::SimpleConstructorInitializerKind: {
const auto *ICC = cast<ConstructorInitializerConstructionContext>(CC);
- return addObjectUnderConstruction(State, ICC->getCXXCtorInitializer(),
- LCtx, V);
+ const auto *Init = ICC->getCXXCtorInitializer();
+ // Base and delegating initializers handled above
+ assert(Init->isAnyMemberInitializer() &&
+ "Base and delegating initializers should have been handled by"
+ "computeObjectUnderConstruction()");
+ return addObjectUnderConstruction(State, Init, LCtx, V);
}
case ConstructionContext::NewAllocatedObjectKind: {
return State;
@@ -602,11 +616,11 @@ void ExprEngine::handleConstructor(const Expr *E,
*Call, *this);
ExplodedNodeSet DstEvaluated;
- StmtNodeBuilder Bldr(DstPreCall, DstEvaluated, *currBldrCtx);
if (CE && CE->getConstructor()->isTrivial() &&
CE->getConstructor()->isCopyOrMoveConstructor() &&
!CallOpts.IsArrayCtorOrDtor) {
+ StmtNodeBuilder Bldr(DstPreCall, DstEvaluated, *currBldrCtx);
// FIXME: Handle other kinds of trivial constructors as well.
for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
I != E; ++I)
@@ -626,6 +640,8 @@ void ExprEngine::handleConstructor(const Expr *E,
// in the CFG, would be called at the end of the full expression or
// later (for life-time extended temporaries) -- but avoids infeasible
// paths when no-return temporary destructors are used for assertions.
+ ExplodedNodeSet DstEvaluatedPostProcessed;
+ StmtNodeBuilder Bldr(DstEvaluated, DstEvaluatedPostProcessed, *currBldrCtx);
const AnalysisDeclContext *ADC = LCtx->getAnalysisDeclContext();
if (!ADC->getCFGBuildOptions().AddTemporaryDtors) {
if (llvm::isa_and_nonnull<CXXTempObjectRegion>(TargetRegion) &&
@@ -655,7 +671,7 @@ void ExprEngine::handleConstructor(const Expr *E,
}
ExplodedNodeSet DstPostArgumentCleanup;
- for (ExplodedNode *I : DstEvaluated)
+ for (ExplodedNode *I : DstEvaluatedPostProcessed)
finishArgumentConstruction(DstPostArgumentCleanup, I, *Call);
// If there were other constructors called for object-type arguments
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 52ba17d59ae0..996d3644e018 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -842,19 +842,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
StringRef Name) {
const IdentifierInfo &II = Ctx.Idents.get(Name);
- DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
- if (!RD->lookup(DeclName).empty())
- return true;
-
- CXXBasePaths Paths(false, false, false);
- if (RD->lookupInBases(
- [DeclName](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
- return CXXRecordDecl::FindOrdinaryMember(Specifier, Path, DeclName);
- },
- Paths))
- return true;
-
- return false;
+ return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II));
}
/// Returns true if the given C++ class is a container or iterator.
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index eb9a0be2e5d6..5a55e81497b0 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -53,10 +53,8 @@ static void populateObjCForDestinationSet(
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
- SVal hasElementsV = svalBuilder.makeTruthVal(hasElements);
-
- // FIXME: S is not an expression. We should not be binding values to it.
- ProgramStateRef nextState = state->BindExpr(S, LCtx, hasElementsV);
+ ProgramStateRef nextState =
+ ExprEngine::setWhetherHasMoreIteration(state, S, LCtx, hasElements);
if (auto MV = elementV.getAs<loc::MemRegionVal>())
if (const auto *R = dyn_cast<TypedValueRegion>(MV->getRegion())) {
@@ -93,10 +91,9 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
// (1) binds the next container value to 'element'. This creates a new
// node in the ExplodedGraph.
//
- // (2) binds the value 0/1 to the ObjCForCollectionStmt* itself, indicating
- // whether or not the container has any more elements. This value
- // will be tested in ProcessBranch. We need to explicitly bind
- // this value because a container can contain nil elements.
+ // (2) note whether the collection has any more elements (or in other words,
+ // whether the loop has more iterations). This will be tested in
+ // processBranch.
//
// FIXME: Eventually this logic should actually do dispatches to
// 'countByEnumeratingWithState:objects:count:' (NSFastEnumeration).
diff --git a/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index bc7c41d039c4..149459cf986a 100644
--- a/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Analysis/IssueHash.h"
#include "clang/Analysis/PathDiagnostic.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
@@ -23,8 +24,6 @@
#include "clang/Lex/Token.h"
#include "clang/Rewrite/Core/HTMLRewrite.h"
#include "clang/Rewrite/Core/Rewriter.h"
-#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
-#include "clang/StaticAnalyzer/Core/IssueHash.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
@@ -58,17 +57,18 @@ using namespace ento;
namespace {
class HTMLDiagnostics : public PathDiagnosticConsumer {
+ PathDiagnosticConsumerOptions DiagOpts;
std::string Directory;
bool createdDir = false;
bool noDir = false;
const Preprocessor &PP;
- AnalyzerOptions &AnalyzerOpts;
const bool SupportsCrossFileDiagnostics;
public:
- HTMLDiagnostics(AnalyzerOptions &AnalyzerOpts, const std::string &OutputDir,
- const Preprocessor &pp, bool supportsMultipleFiles)
- : Directory(OutputDir), PP(pp), AnalyzerOpts(AnalyzerOpts),
+ HTMLDiagnostics(PathDiagnosticConsumerOptions DiagOpts,
+ const std::string &OutputDir, const Preprocessor &pp,
+ bool supportsMultipleFiles)
+ : DiagOpts(std::move(DiagOpts)), Directory(OutputDir), PP(pp),
SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
~HTMLDiagnostics() override { FlushDiagnostics(nullptr); }
@@ -133,7 +133,7 @@ private:
} // namespace
void ento::createHTMLDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &OutputDir, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU) {
@@ -142,37 +142,38 @@ void ento::createHTMLDiagnosticConsumer(
// output mode. This doesn't make much sense, we should have the minimal text
// as our default. In the case of backward compatibility concerns, this could
// be preserved with -analyzer-config-compatibility-mode=true.
- createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputDir, PP, CTU);
+ createTextMinimalPathDiagnosticConsumer(DiagOpts, C, OutputDir, PP, CTU);
// TODO: Emit an error here.
if (OutputDir.empty())
return;
- C.push_back(new HTMLDiagnostics(AnalyzerOpts, OutputDir, PP, true));
+ C.push_back(new HTMLDiagnostics(std::move(DiagOpts), OutputDir, PP, true));
}
void ento::createHTMLSingleFileDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &OutputDir, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU) {
+ createTextMinimalPathDiagnosticConsumer(DiagOpts, C, OutputDir, PP, CTU);
// TODO: Emit an error here.
if (OutputDir.empty())
return;
- C.push_back(new HTMLDiagnostics(AnalyzerOpts, OutputDir, PP, false));
- createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputDir, PP, CTU);
+ C.push_back(new HTMLDiagnostics(std::move(DiagOpts), OutputDir, PP, false));
}
void ento::createPlistHTMLDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &prefix, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU) {
createHTMLDiagnosticConsumer(
- AnalyzerOpts, C, std::string(llvm::sys::path::parent_path(prefix)), PP,
+ DiagOpts, C, std::string(llvm::sys::path::parent_path(prefix)), PP,
CTU);
- createPlistMultiFileDiagnosticConsumer(AnalyzerOpts, C, prefix, PP, CTU);
- createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, prefix, PP, CTU);
+ createPlistMultiFileDiagnosticConsumer(DiagOpts, C, prefix, PP, CTU);
+ createTextMinimalPathDiagnosticConsumer(std::move(DiagOpts), C, prefix, PP,
+ CTU);
}
//===----------------------------------------------------------------------===//
@@ -245,7 +246,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
int FD;
SmallString<128> Model, ResultPath;
- if (!AnalyzerOpts.ShouldWriteStableReportFilename) {
+ if (!DiagOpts.ShouldWriteStableReportFilename) {
llvm::sys::path::append(Model, Directory, "report-%%%%%%.html");
if (std::error_code EC =
llvm::sys::fs::make_absolute(Model)) {
@@ -535,7 +536,7 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
<input type="checkbox" class="spoilerhider" id="showinvocation" />
<label for="showinvocation" >Show analyzer invocation</label>
<div class="spoiler">clang -cc1 )<<<";
- os << html::EscapeText(AnalyzerOpts.FullCompilerInvocation);
+ os << html::EscapeText(DiagOpts.ToolInvocation);
os << R"<<<(
</div>
<div id='tooltiphint' hidden="true">
@@ -582,8 +583,8 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
os << "\n<!-- FUNCTIONNAME " << declName << " -->\n";
os << "\n<!-- ISSUEHASHCONTENTOFLINEINCONTEXT "
- << GetIssueHash(SMgr, L, D.getCheckerName(), D.getBugType(),
- DeclWithIssue, PP.getLangOpts())
+ << getIssueHash(L, D.getCheckerName(), D.getBugType(), DeclWithIssue,
+ PP.getLangOpts())
<< " -->\n";
os << "\n<!-- BUGLINE "
@@ -786,8 +787,8 @@ void HTMLDiagnostics::HandlePiece(Rewriter &R, FileID BugFileID,
if (LPosInfo.first != BugFileID)
return;
- const llvm::MemoryBuffer *Buf = SM.getBuffer(LPosInfo.first);
- const char* FileStart = Buf->getBufferStart();
+ llvm::MemoryBufferRef Buf = SM.getBufferOrFake(LPosInfo.first);
+ const char *FileStart = Buf.getBufferStart();
// Compute the column number. Rewind from the current position to the start
// of the line.
@@ -797,7 +798,7 @@ void HTMLDiagnostics::HandlePiece(Rewriter &R, FileID BugFileID,
// Compute LineEnd.
const char *LineEnd = TokInstantiationPtr;
- const char* FileEnd = Buf->getBufferEnd();
+ const char *FileEnd = Buf.getBufferEnd();
while (*LineEnd != '\n' && LineEnd != FileEnd)
++LineEnd;
diff --git a/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index ed62778623a8..35e320c7755f 100644
--- a/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Analysis/IssueHash.h"
#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/PlistSupport.h"
@@ -20,13 +21,12 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/TokenConcatenation.h"
#include "clang/Rewrite/Core/HTMLRewrite.h"
-#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
-#include "clang/StaticAnalyzer/Core/IssueHash.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Casting.h"
+#include <memory>
using namespace clang;
using namespace ento;
@@ -39,13 +39,17 @@ using namespace markup;
namespace {
class PlistDiagnostics : public PathDiagnosticConsumer {
+ PathDiagnosticConsumerOptions DiagOpts;
const std::string OutputFile;
const Preprocessor &PP;
const cross_tu::CrossTranslationUnitContext &CTU;
- AnalyzerOptions &AnOpts;
const bool SupportsCrossFileDiagnostics;
+
+ void printBugPath(llvm::raw_ostream &o, const FIDMap &FM,
+ const PathPieces &Path);
+
public:
- PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
+ PlistDiagnostics(PathDiagnosticConsumerOptions DiagOpts,
const std::string &OutputFile, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU,
bool supportsMultipleFiles);
@@ -74,23 +78,19 @@ namespace {
/// A helper class for emitting a single report.
class PlistPrinter {
const FIDMap& FM;
- AnalyzerOptions &AnOpts;
const Preprocessor &PP;
const cross_tu::CrossTranslationUnitContext &CTU;
llvm::SmallVector<const PathDiagnosticMacroPiece *, 0> MacroPieces;
public:
- PlistPrinter(const FIDMap& FM, AnalyzerOptions &AnOpts,
+ PlistPrinter(const FIDMap& FM,
const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU)
- : FM(FM), AnOpts(AnOpts), PP(PP), CTU(CTU) {
+ : FM(FM), PP(PP), CTU(CTU) {
}
void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P) {
ReportPiece(o, P, /*indent*/ 4, /*depth*/ 0, /*includeControlFlow*/ true);
-
- // Don't emit a warning about an unused private field.
- (void)AnOpts;
}
/// Print the expansions of the collected macro pieces.
@@ -165,11 +165,6 @@ struct ExpansionInfo {
} // end of anonymous namespace
-static void printBugPath(llvm::raw_ostream &o, const FIDMap& FM,
- AnalyzerOptions &AnOpts, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU,
- const PathPieces &Path);
-
/// Print coverage information to output stream {@code o}.
/// May modify the used list of files {@code Fids} by inserting new ones.
static void printCoverage(const PathDiagnostic *D,
@@ -520,11 +515,53 @@ static void printCoverage(const PathDiagnostic *D,
assert(IndentLevel == InputIndentLevel);
}
-static void printBugPath(llvm::raw_ostream &o, const FIDMap& FM,
- AnalyzerOptions &AnOpts, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU,
- const PathPieces &Path) {
- PlistPrinter Printer(FM, AnOpts, PP, CTU);
+//===----------------------------------------------------------------------===//
+// Methods of PlistDiagnostics.
+//===----------------------------------------------------------------------===//
+
+PlistDiagnostics::PlistDiagnostics(
+ PathDiagnosticConsumerOptions DiagOpts, const std::string &output,
+ const Preprocessor &PP, const cross_tu::CrossTranslationUnitContext &CTU,
+ bool supportsMultipleFiles)
+ : DiagOpts(std::move(DiagOpts)), OutputFile(output), PP(PP), CTU(CTU),
+ SupportsCrossFileDiagnostics(supportsMultipleFiles) {
+ // FIXME: Will be used by a later planned change.
+ (void)this->CTU;
+}
+
+void ento::createPlistDiagnosticConsumer(
+ PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
+ const std::string &OutputFile, const Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+
+ // TODO: Emit an error here.
+ if (OutputFile.empty())
+ return;
+
+ C.push_back(new PlistDiagnostics(DiagOpts, OutputFile, PP, CTU,
+ /*supportsMultipleFiles=*/false));
+ createTextMinimalPathDiagnosticConsumer(std::move(DiagOpts), C, OutputFile,
+ PP, CTU);
+}
+
+void ento::createPlistMultiFileDiagnosticConsumer(
+ PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
+ const std::string &OutputFile, const Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+
+ // TODO: Emit an error here.
+ if (OutputFile.empty())
+ return;
+
+ C.push_back(new PlistDiagnostics(DiagOpts, OutputFile, PP, CTU,
+ /*supportsMultipleFiles=*/true));
+ createTextMinimalPathDiagnosticConsumer(std::move(DiagOpts), C, OutputFile,
+ PP, CTU);
+}
+
+void PlistDiagnostics::printBugPath(llvm::raw_ostream &o, const FIDMap &FM,
+ const PathPieces &Path) {
+ PlistPrinter Printer(FM, PP, CTU);
assert(std::is_partitioned(Path.begin(), Path.end(),
[](const PathDiagnosticPieceRef &E) {
return E->getKind() == PathDiagnosticPiece::Note;
@@ -557,7 +594,7 @@ static void printBugPath(llvm::raw_ostream &o, const FIDMap& FM,
o << " </array>\n";
- if (!AnOpts.ShouldDisplayMacroExpansions)
+ if (!DiagOpts.ShouldDisplayMacroExpansions)
return;
o << " <key>macro_expansions</key>\n"
@@ -566,48 +603,6 @@ static void printBugPath(llvm::raw_ostream &o, const FIDMap& FM,
o << " </array>\n";
}
-//===----------------------------------------------------------------------===//
-// Methods of PlistDiagnostics.
-//===----------------------------------------------------------------------===//
-
-PlistDiagnostics::PlistDiagnostics(
- AnalyzerOptions &AnalyzerOpts, const std::string &output,
- const Preprocessor &PP, const cross_tu::CrossTranslationUnitContext &CTU,
- bool supportsMultipleFiles)
- : OutputFile(output), PP(PP), CTU(CTU), AnOpts(AnalyzerOpts),
- SupportsCrossFileDiagnostics(supportsMultipleFiles) {
- // FIXME: Will be used by a later planned change.
- (void)this->CTU;
-}
-
-void ento::createPlistDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &OutputFile, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
-
- // TODO: Emit an error here.
- if (OutputFile.empty())
- return;
-
- C.push_back(new PlistDiagnostics(AnalyzerOpts, OutputFile, PP, CTU,
- /*supportsMultipleFiles*/ false));
- createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputFile, PP, CTU);
-}
-
-void ento::createPlistMultiFileDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &OutputFile, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
-
- // TODO: Emit an error here.
- if (OutputFile.empty())
- return;
-
- C.push_back(new PlistDiagnostics(AnalyzerOpts, OutputFile, PP, CTU,
- /*supportsMultipleFiles*/ true));
- createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputFile, PP, CTU);
-}
-
void PlistDiagnostics::FlushDiagnosticsImpl(
std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) {
@@ -682,7 +677,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " <dict>\n";
const PathDiagnostic *D = *DI;
- printBugPath(o, FM, AnOpts, PP, CTU, D->path);
+ printBugPath(o, FM, D->path);
// Output the bug type and bug category.
o << " <key>description</key>";
@@ -702,7 +697,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
: D->getLocation().asLocation()),
SM);
const Decl *DeclWithIssue = D->getDeclWithIssue();
- EmitString(o, GetIssueHash(SM, L, D->getCheckerName(), D->getBugType(),
+ EmitString(o, getIssueHash(L, D->getCheckerName(), D->getBugType(),
DeclWithIssue, LangOpts))
<< '\n';
@@ -806,7 +801,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
EmitString(o << " ", SM.getFileEntryForID(FID)->getName()) << '\n';
o << " </array>\n";
- if (llvm::AreStatisticsEnabled() && AnOpts.ShouldSerializeStats) {
+ if (llvm::AreStatisticsEnabled() && DiagOpts.ShouldSerializeStats) {
o << " <key>statistics</key>\n";
std::string stats;
llvm::raw_string_ostream os(stats);
@@ -825,22 +820,36 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
namespace {
-using ExpArgTokens = llvm::SmallVector<Token, 2>;
+using ArgTokensTy = llvm::SmallVector<Token, 2>;
+
+} // end of anonymous namespace
+
+LLVM_DUMP_METHOD static void dumpArgTokensToStream(llvm::raw_ostream &Out,
+ const Preprocessor &PP,
+ const ArgTokensTy &Toks);
-/// Maps unexpanded macro arguments to expanded arguments. A macro argument may
+namespace {
+/// Maps unexpanded macro parameters to expanded arguments. A macro argument may
/// need to expanded further when it is nested inside another macro.
-class MacroArgMap : public std::map<const IdentifierInfo *, ExpArgTokens> {
+class MacroParamMap : public std::map<const IdentifierInfo *, ArgTokensTy> {
public:
- void expandFromPrevMacro(const MacroArgMap &Super);
+ void expandFromPrevMacro(const MacroParamMap &Super);
+
+ LLVM_DUMP_METHOD void dump(const Preprocessor &PP) const {
+ dumpToStream(llvm::errs(), PP);
+ }
+
+ LLVM_DUMP_METHOD void dumpToStream(llvm::raw_ostream &Out,
+ const Preprocessor &PP) const;
};
-struct MacroNameAndArgs {
+struct MacroExpansionInfo {
std::string Name;
const MacroInfo *MI = nullptr;
- MacroArgMap Args;
+ MacroParamMap ParamMap;
- MacroNameAndArgs(std::string N, const MacroInfo *MI, MacroArgMap M)
- : Name(std::move(N)), MI(MI), Args(std::move(M)) {}
+ MacroExpansionInfo(std::string N, const MacroInfo *MI, MacroParamMap M)
+ : Name(std::move(N)), MI(MI), ParamMap(std::move(M)) {}
};
class TokenPrinter {
@@ -860,6 +869,46 @@ public:
void printToken(const Token &Tok);
};
+/// Wrapper around a Lexer object that can lex tokens one-by-one. Its possible
+/// to "inject" a range of tokens into the stream, in which case the next token
+/// is retrieved from the next element of the range, until the end of the range
+/// is reached.
+class TokenStream {
+public:
+ TokenStream(SourceLocation ExpanLoc, const SourceManager &SM,
+ const LangOptions &LangOpts)
+ : ExpanLoc(ExpanLoc) {
+ FileID File;
+ unsigned Offset;
+ std::tie(File, Offset) = SM.getDecomposedLoc(ExpanLoc);
+ llvm::MemoryBufferRef MB = SM.getBufferOrFake(File);
+ const char *MacroNameTokenPos = MB.getBufferStart() + Offset;
+
+ RawLexer = std::make_unique<Lexer>(SM.getLocForStartOfFile(File), LangOpts,
+ MB.getBufferStart(), MacroNameTokenPos,
+ MB.getBufferEnd());
+ }
+
+ void next(Token &Result) {
+ if (CurrTokenIt == TokenRange.end()) {
+ RawLexer->LexFromRawLexer(Result);
+ return;
+ }
+ Result = *CurrTokenIt;
+ CurrTokenIt++;
+ }
+
+ void injectRange(const ArgTokensTy &Range) {
+ TokenRange = Range;
+ CurrTokenIt = TokenRange.begin();
+ }
+
+ std::unique_ptr<Lexer> RawLexer;
+ ArgTokensTy TokenRange;
+ ArgTokensTy::iterator CurrTokenIt = TokenRange.begin();
+ SourceLocation ExpanLoc;
+};
+
} // end of anonymous namespace
/// The implementation method of getMacroExpansion: It prints the expansion of
@@ -878,7 +927,7 @@ public:
///
/// As we expand the last line, we'll immediately replace PRINT(str) with
/// print(x). The information that both 'str' and 'x' refers to the same string
-/// is an information we have to forward, hence the argument \p PrevArgs.
+/// is an information we have to forward, hence the argument \p PrevParamMap.
///
/// To avoid infinite recursion we maintain the already processed tokens in
/// a set. This is carried as a parameter through the recursive calls. The set
@@ -888,13 +937,11 @@ public:
/// #define f(y) x
/// #define x f(x)
static std::string getMacroNameAndPrintExpansion(
- TokenPrinter &Printer,
- SourceLocation MacroLoc,
- const Preprocessor &PP,
- const MacroArgMap &PrevArgs,
+ TokenPrinter &Printer, SourceLocation MacroLoc, const Preprocessor &PP,
+ const MacroParamMap &PrevParamMap,
llvm::SmallPtrSet<IdentifierInfo *, 8> &AlreadyProcessedTokens);
-/// Retrieves the name of the macro and what it's arguments expand into
+/// Retrieves the name of the macro and what it's parameters expand into
/// at \p ExpanLoc.
///
/// For example, for the following macro expansion:
@@ -916,8 +963,9 @@ static std::string getMacroNameAndPrintExpansion(
/// When \p ExpanLoc references "SET_TO_NULL(a)" within the definition of
/// "NOT_SUSPICOUS", the macro name "SET_TO_NULL" and the MacroArgMap map
/// { (x, a) } will be returned.
-static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
- const Preprocessor &PP);
+static MacroExpansionInfo
+getMacroExpansionInfo(const MacroParamMap &PrevParamMap,
+ SourceLocation ExpanLoc, const Preprocessor &PP);
/// Retrieves the ')' token that matches '(' \p It points to.
static MacroInfo::tokens_iterator getMatchingRParen(
@@ -951,21 +999,20 @@ getExpandedMacro(SourceLocation MacroLoc, const Preprocessor &PP,
llvm::SmallPtrSet<IdentifierInfo*, 8> AlreadyProcessedTokens;
std::string MacroName = getMacroNameAndPrintExpansion(
- Printer, MacroLoc, *PPToUse, MacroArgMap{}, AlreadyProcessedTokens);
+ Printer, MacroLoc, *PPToUse, MacroParamMap{}, AlreadyProcessedTokens);
return {MacroName, std::string(OS.str())};
}
static std::string getMacroNameAndPrintExpansion(
- TokenPrinter &Printer,
- SourceLocation MacroLoc,
- const Preprocessor &PP,
- const MacroArgMap &PrevArgs,
+ TokenPrinter &Printer, SourceLocation MacroLoc, const Preprocessor &PP,
+ const MacroParamMap &PrevParamMap,
llvm::SmallPtrSet<IdentifierInfo *, 8> &AlreadyProcessedTokens) {
const SourceManager &SM = PP.getSourceManager();
- MacroNameAndArgs Info = getMacroNameAndArgs(SM.getExpansionLoc(MacroLoc), PP);
- IdentifierInfo* IDInfo = PP.getIdentifierInfo(Info.Name);
+ MacroExpansionInfo MExpInfo =
+ getMacroExpansionInfo(PrevParamMap, SM.getExpansionLoc(MacroLoc), PP);
+ IdentifierInfo *MacroNameII = PP.getIdentifierInfo(MExpInfo.Name);
// TODO: If the macro definition contains another symbol then this function is
// called recursively. In case this symbol is the one being defined, it will
@@ -973,18 +1020,18 @@ static std::string getMacroNameAndPrintExpansion(
// in this case we don't get the full expansion text in the Plist file. See
// the test file where "value" is expanded to "garbage_" instead of
// "garbage_value".
- if (!AlreadyProcessedTokens.insert(IDInfo).second)
- return Info.Name;
+ if (!AlreadyProcessedTokens.insert(MacroNameII).second)
+ return MExpInfo.Name;
- if (!Info.MI)
- return Info.Name;
+ if (!MExpInfo.MI)
+ return MExpInfo.Name;
// Manually expand its arguments from the previous macro.
- Info.Args.expandFromPrevMacro(PrevArgs);
+ MExpInfo.ParamMap.expandFromPrevMacro(PrevParamMap);
// Iterate over the macro's tokens and stringify them.
- for (auto It = Info.MI->tokens_begin(), E = Info.MI->tokens_end(); It != E;
- ++It) {
+ for (auto It = MExpInfo.MI->tokens_begin(), E = MExpInfo.MI->tokens_end();
+ It != E; ++It) {
Token T = *It;
// If this token is not an identifier, we only need to print it.
@@ -1000,8 +1047,8 @@ static std::string getMacroNameAndPrintExpansion(
// If this token is a macro that should be expanded inside the current
// macro.
if (getMacroInfoForLocation(PP, SM, II, T.getLocation())) {
- getMacroNameAndPrintExpansion(Printer, T.getLocation(), PP, Info.Args,
- AlreadyProcessedTokens);
+ getMacroNameAndPrintExpansion(Printer, T.getLocation(), PP,
+ MExpInfo.ParamMap, AlreadyProcessedTokens);
// If this is a function-like macro, skip its arguments, as
// getExpandedMacro() already printed them. If this is the case, let's
@@ -1013,10 +1060,10 @@ static std::string getMacroNameAndPrintExpansion(
}
// If this token is the current macro's argument, we should expand it.
- auto ArgMapIt = Info.Args.find(II);
- if (ArgMapIt != Info.Args.end()) {
- for (MacroInfo::tokens_iterator ArgIt = ArgMapIt->second.begin(),
- ArgEnd = ArgMapIt->second.end();
+ auto ParamToArgIt = MExpInfo.ParamMap.find(II);
+ if (ParamToArgIt != MExpInfo.ParamMap.end()) {
+ for (MacroInfo::tokens_iterator ArgIt = ParamToArgIt->second.begin(),
+ ArgEnd = ParamToArgIt->second.end();
ArgIt != ArgEnd; ++ArgIt) {
// These tokens may still be macros, if that is the case, handle it the
@@ -1034,7 +1081,8 @@ static std::string getMacroNameAndPrintExpansion(
}
getMacroNameAndPrintExpansion(Printer, ArgIt->getLocation(), PP,
- Info.Args, AlreadyProcessedTokens);
+ MExpInfo.ParamMap,
+ AlreadyProcessedTokens);
// Peek the next token if it is a tok::l_paren. This way we can decide
// if this is the application or just a reference to a function maxro
// symbol:
@@ -1055,34 +1103,30 @@ static std::string getMacroNameAndPrintExpansion(
Printer.printToken(T);
}
- AlreadyProcessedTokens.erase(IDInfo);
+ AlreadyProcessedTokens.erase(MacroNameII);
- return Info.Name;
+ return MExpInfo.Name;
}
-static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
- const Preprocessor &PP) {
+static MacroExpansionInfo
+getMacroExpansionInfo(const MacroParamMap &PrevParamMap,
+ SourceLocation ExpanLoc, const Preprocessor &PP) {
const SourceManager &SM = PP.getSourceManager();
const LangOptions &LangOpts = PP.getLangOpts();
// First, we create a Lexer to lex *at the expansion location* the tokens
// referring to the macro's name and its arguments.
- std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(ExpanLoc);
- const llvm::MemoryBuffer *MB = SM.getBuffer(LocInfo.first);
- const char *MacroNameTokenPos = MB->getBufferStart() + LocInfo.second;
-
- Lexer RawLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
- MB->getBufferStart(), MacroNameTokenPos, MB->getBufferEnd());
+ TokenStream TStream(ExpanLoc, SM, LangOpts);
// Acquire the macro's name.
Token TheTok;
- RawLexer.LexFromRawLexer(TheTok);
+ TStream.next(TheTok);
std::string MacroName = PP.getSpelling(TheTok);
const auto *II = PP.getIdentifierInfo(MacroName);
- assert(II && "Failed to acquire the IndetifierInfo for the macro!");
+ assert(II && "Failed to acquire the IdentifierInfo for the macro!");
const MacroInfo *MI = getMacroInfoForLocation(PP, SM, II, ExpanLoc);
// assert(MI && "The macro must've been defined at it's expansion location!");
@@ -1094,18 +1138,18 @@ static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
if (!MI)
return { MacroName, MI, {} };
- // Acquire the macro's arguments.
+ // Acquire the macro's arguments at the expansion point.
//
// The rough idea here is to lex from the first left parentheses to the last
- // right parentheses, and map the macro's unexpanded arguments to what they
- // will be expanded to. An expanded macro argument may contain several tokens
- // (like '3 + 4'), so we'll lex until we find a tok::comma or tok::r_paren, at
- // which point we start lexing the next argument or finish.
- ArrayRef<const IdentifierInfo *> MacroArgs = MI->params();
- if (MacroArgs.empty())
+ // right parentheses, and map the macro's parameter to what they will be
+ // expanded to. A macro argument may contain several token (like '3 + 4'), so
+ // we'll lex until we find a tok::comma or tok::r_paren, at which point we
+ // start lexing the next argument or finish.
+ ArrayRef<const IdentifierInfo *> MacroParams = MI->params();
+ if (MacroParams.empty())
return { MacroName, MI, {} };
- RawLexer.LexFromRawLexer(TheTok);
+ TStream.next(TheTok);
// When this is a token which expands to another macro function then its
// parentheses are not at its expansion locaiton. For example:
//
@@ -1117,9 +1161,9 @@ static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
if (TheTok.isNot(tok::l_paren))
return { MacroName, MI, {} };
- MacroArgMap Args;
+ MacroParamMap ParamMap;
- // When the macro's argument is a function call, like
+ // When the argument is a function call, like
// CALL_FN(someFunctionName(param1, param2))
// we will find tok::l_paren, tok::r_paren, and tok::comma that do not divide
// actual macro arguments, or do not represent the macro argument's closing
@@ -1130,12 +1174,19 @@ static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
// * > 1, then tok::comma is a part of the current arg.
int ParenthesesDepth = 1;
- // If we encounter __VA_ARGS__, we will lex until the closing tok::r_paren,
- // even if we lex a tok::comma and ParanthesesDepth == 1.
- const IdentifierInfo *__VA_ARGS__II = PP.getIdentifierInfo("__VA_ARGS__");
+ // If we encounter the variadic arg, we will lex until the closing
+ // tok::r_paren, even if we lex a tok::comma and ParanthesesDepth == 1.
+ const IdentifierInfo *VariadicParamII = PP.getIdentifierInfo("__VA_ARGS__");
+ if (MI->isGNUVarargs()) {
+ // If macro uses GNU-style variadic args, the param name is user-supplied,
+ // an not "__VA_ARGS__". E.g.:
+ // #define FOO(a, b, myvargs...)
+ // In this case, just use the last parameter:
+ VariadicParamII = *(MacroParams.rbegin());
+ }
- for (const IdentifierInfo *UnexpArgII : MacroArgs) {
- MacroArgMap::mapped_type ExpandedArgTokens;
+ for (const IdentifierInfo *CurrParamII : MacroParams) {
+ MacroParamMap::mapped_type ArgTokens;
// One could also simply not supply a single argument to __VA_ARGS__ -- this
// results in a preprocessor warning, but is not an error:
@@ -1149,10 +1200,10 @@ static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
if (ParenthesesDepth != 0) {
// Lex the first token of the next macro parameter.
- RawLexer.LexFromRawLexer(TheTok);
+ TStream.next(TheTok);
- while (!(ParenthesesDepth == 1 &&
- (UnexpArgII == __VA_ARGS__II ? false : TheTok.is(tok::comma)))) {
+ while (CurrParamII == VariadicParamII || ParenthesesDepth != 1 ||
+ !TheTok.is(tok::comma)) {
assert(TheTok.isNot(tok::eof) &&
"EOF encountered while looking for expanded macro args!");
@@ -1165,24 +1216,51 @@ static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
if (ParenthesesDepth == 0)
break;
- if (TheTok.is(tok::raw_identifier))
+ if (TheTok.is(tok::raw_identifier)) {
PP.LookUpIdentifierInfo(TheTok);
+ // This token is a variadic parameter:
+ //
+ // #define PARAMS_RESOLVE_TO_VA_ARGS(i, fmt) foo(i, fmt); \
+ // i = 0;
+ // #define DISPATCH(...) \
+ // PARAMS_RESOLVE_TO_VA_ARGS(__VA_ARGS__);
+ // // ^~~~~~~~~~~ Variadic parameter here
+ //
+ // void multipleParamsResolveToVA_ARGS(void) {
+ // int x = 1;
+ // DISPATCH(x, "LF1M healer"); // Multiple arguments are mapped to
+ // // a single __VA_ARGS__ parameter.
+ // (void)(10 / x);
+ // }
+ //
+ // We will stumble across this while trying to expand
+ // PARAMS_RESOLVE_TO_VA_ARGS. By this point, we already noted during
+ // the processing of DISPATCH what __VA_ARGS__ maps to, so we'll
+ // retrieve the next series of tokens from that.
+ if (TheTok.getIdentifierInfo() == VariadicParamII) {
+ TStream.injectRange(PrevParamMap.at(VariadicParamII));
+ TStream.next(TheTok);
+ continue;
+ }
+ }
- ExpandedArgTokens.push_back(TheTok);
- RawLexer.LexFromRawLexer(TheTok);
+ ArgTokens.push_back(TheTok);
+ TStream.next(TheTok);
}
} else {
- assert(UnexpArgII == __VA_ARGS__II);
+ assert(CurrParamII == VariadicParamII &&
+ "No more macro arguments are found, but the current parameter "
+ "isn't the variadic arg!");
}
- Args.emplace(UnexpArgII, std::move(ExpandedArgTokens));
+ ParamMap.emplace(CurrParamII, std::move(ArgTokens));
}
assert(TheTok.is(tok::r_paren) &&
"Expanded macro argument acquisition failed! After the end of the loop"
" this token should be ')'!");
- return { MacroName, MI, Args };
+ return {MacroName, MI, ParamMap};
}
static MacroInfo::tokens_iterator getMatchingRParen(
@@ -1222,14 +1300,14 @@ static const MacroInfo *getMacroInfoForLocation(const Preprocessor &PP,
return MD->findDirectiveAtLoc(Loc, SM).getMacroInfo();
}
-void MacroArgMap::expandFromPrevMacro(const MacroArgMap &Super) {
+void MacroParamMap::expandFromPrevMacro(const MacroParamMap &Super) {
for (value_type &Pair : *this) {
- ExpArgTokens &CurrExpArgTokens = Pair.second;
+ ArgTokensTy &CurrArgTokens = Pair.second;
// For each token in the expanded macro argument.
- auto It = CurrExpArgTokens.begin();
- while (It != CurrExpArgTokens.end()) {
+ auto It = CurrArgTokens.begin();
+ while (It != CurrArgTokens.end()) {
if (It->isNot(tok::identifier)) {
++It;
continue;
@@ -1244,17 +1322,43 @@ void MacroArgMap::expandFromPrevMacro(const MacroArgMap &Super) {
continue;
}
- const ExpArgTokens &SuperExpArgTokens = Super.at(II);
+ const ArgTokensTy &SuperArgTokens = Super.at(II);
- It = CurrExpArgTokens.insert(
- It, SuperExpArgTokens.begin(), SuperExpArgTokens.end());
- std::advance(It, SuperExpArgTokens.size());
- It = CurrExpArgTokens.erase(It);
+ It = CurrArgTokens.insert(It, SuperArgTokens.begin(),
+ SuperArgTokens.end());
+ std::advance(It, SuperArgTokens.size());
+ It = CurrArgTokens.erase(It);
}
}
}
+void MacroParamMap::dumpToStream(llvm::raw_ostream &Out,
+ const Preprocessor &PP) const {
+ for (const std::pair<const IdentifierInfo *, ArgTokensTy> Pair : *this) {
+ Out << Pair.first->getName() << " -> ";
+ dumpArgTokensToStream(Out, PP, Pair.second);
+ Out << '\n';
+ }
+}
+
+static void dumpArgTokensToStream(llvm::raw_ostream &Out,
+ const Preprocessor &PP,
+ const ArgTokensTy &Toks) {
+ TokenPrinter Printer(Out, PP);
+ for (Token Tok : Toks)
+ Printer.printToken(Tok);
+}
+
void TokenPrinter::printToken(const Token &Tok) {
+ // TODO: Handle GNU extensions where hash and hashhash occurs right before
+ // __VA_ARGS__.
+ // cppreference.com: "some compilers offer an extension that allows ## to
+ // appear after a comma and before __VA_ARGS__, in which case the ## does
+ // nothing when the variable arguments are present, but removes the comma when
+ // the variable arguments are not present: this makes it possible to define
+ // macros such as fprintf (stderr, format, ##__VA_ARGS__)"
+ // FIXME: Handle named variadic macro parameters (also a GNU extension).
+
// If this is the first token to be printed, don't print space.
if (PrevTok.isNot(tok::unknown)) {
// If the tokens were already space separated, or if they must be to avoid
diff --git a/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index 006a4006b7fc..1ccb0de92fba 100644
--- a/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -582,9 +582,6 @@ bool ScanReachableSymbols::scan(SVal val) {
if (SymbolRef Sym = val.getAsSymbol())
return scan(Sym);
- if (const SymExpr *Sym = val.getAsSymbolicExpression())
- return scan(Sym);
-
if (Optional<nonloc::CompoundVal> X = val.getAs<nonloc::CompoundVal>())
return scan(*X);
diff --git a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index cb6f61e86ae3..a481bde1651b 100644
--- a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -89,7 +89,7 @@ public:
}
TriStateKind getCmpOpState(BinaryOperatorKind CurrentOP,
- BinaryOperatorKind QueriedOP) const {
+ BinaryOperatorKind QueriedOP) const {
return CmpOpTable[getIndexFromOp(CurrentOP)][getIndexFromOp(QueriedOP)];
}
@@ -364,6 +364,18 @@ RangeSet RangeSet::Negate(BasicValueFactory &BV, Factory &F) const {
return newRanges;
}
+RangeSet RangeSet::Delete(BasicValueFactory &BV, Factory &F,
+ const llvm::APSInt &Point) const {
+ llvm::APSInt Upper = Point;
+ llvm::APSInt Lower = Point;
+
+ ++Upper;
+ --Lower;
+
+ // Notice that the lower bound is greater than the upper bound.
+ return Intersect(BV, F, Upper, Lower);
+}
+
void RangeSet::print(raw_ostream &os) const {
bool isFirst = true;
os << "{ ";
@@ -379,7 +391,315 @@ void RangeSet::print(raw_ostream &os) const {
os << " }";
}
+REGISTER_SET_FACTORY_WITH_PROGRAMSTATE(SymbolSet, SymbolRef)
+
namespace {
+class EquivalenceClass;
+} // end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(ClassMap, SymbolRef, EquivalenceClass)
+REGISTER_MAP_WITH_PROGRAMSTATE(ClassMembers, EquivalenceClass, SymbolSet)
+REGISTER_MAP_WITH_PROGRAMSTATE(ConstraintRange, EquivalenceClass, RangeSet)
+
+REGISTER_SET_FACTORY_WITH_PROGRAMSTATE(ClassSet, EquivalenceClass)
+REGISTER_MAP_WITH_PROGRAMSTATE(DisequalityMap, EquivalenceClass, ClassSet)
+
+namespace {
+/// This class encapsulates a set of symbols equal to each other.
+///
+/// The main idea of the approach requiring such classes is in narrowing
+/// and sharing constraints between symbols within the class. Also we can
+/// conclude that there is no practical need in storing constraints for
+/// every member of the class separately.
+///
+/// Main terminology:
+///
+/// * "Equivalence class" is an object of this class, which can be efficiently
+/// compared to other classes. It represents the whole class without
+/// storing the actual in it. The members of the class however can be
+/// retrieved from the state.
+///
+/// * "Class members" are the symbols corresponding to the class. This means
+/// that A == B for every member symbols A and B from the class. Members of
+/// each class are stored in the state.
+///
+/// * "Trivial class" is a class that has and ever had only one same symbol.
+///
+/// * "Merge operation" merges two classes into one. It is the main operation
+/// to produce non-trivial classes.
+/// If, at some point, we can assume that two symbols from two distinct
+/// classes are equal, we can merge these classes.
+class EquivalenceClass : public llvm::FoldingSetNode {
+public:
+ /// Find equivalence class for the given symbol in the given state.
+ LLVM_NODISCARD static inline EquivalenceClass find(ProgramStateRef State,
+ SymbolRef Sym);
+
+ /// Merge classes for the given symbols and return a new state.
+ LLVM_NODISCARD static inline ProgramStateRef
+ merge(BasicValueFactory &BV, RangeSet::Factory &F, ProgramStateRef State,
+ SymbolRef First, SymbolRef Second);
+ // Merge this class with the given class and return a new state.
+ LLVM_NODISCARD inline ProgramStateRef merge(BasicValueFactory &BV,
+ RangeSet::Factory &F,
+ ProgramStateRef State,
+ EquivalenceClass Other);
+
+ /// Return a set of class members for the given state.
+ LLVM_NODISCARD inline SymbolSet getClassMembers(ProgramStateRef State);
+ /// Return true if the current class is trivial in the given state.
+ LLVM_NODISCARD inline bool isTrivial(ProgramStateRef State);
+ /// Return true if the current class is trivial and its only member is dead.
+ LLVM_NODISCARD inline bool isTriviallyDead(ProgramStateRef State,
+ SymbolReaper &Reaper);
+
+ LLVM_NODISCARD static inline ProgramStateRef
+ markDisequal(BasicValueFactory &BV, RangeSet::Factory &F,
+ ProgramStateRef State, SymbolRef First, SymbolRef Second);
+ LLVM_NODISCARD static inline ProgramStateRef
+ markDisequal(BasicValueFactory &BV, RangeSet::Factory &F,
+ ProgramStateRef State, EquivalenceClass First,
+ EquivalenceClass Second);
+ LLVM_NODISCARD inline ProgramStateRef
+ markDisequal(BasicValueFactory &BV, RangeSet::Factory &F,
+ ProgramStateRef State, EquivalenceClass Other) const;
+ LLVM_NODISCARD static inline ClassSet
+ getDisequalClasses(ProgramStateRef State, SymbolRef Sym);
+ LLVM_NODISCARD inline ClassSet
+ getDisequalClasses(ProgramStateRef State) const;
+ LLVM_NODISCARD inline ClassSet
+ getDisequalClasses(DisequalityMapTy Map, ClassSet::Factory &Factory) const;
+
+ LLVM_NODISCARD static inline Optional<bool>
+ areEqual(ProgramStateRef State, SymbolRef First, SymbolRef Second);
+
+ /// Check equivalence data for consistency.
+ LLVM_NODISCARD LLVM_ATTRIBUTE_UNUSED static bool
+ isClassDataConsistent(ProgramStateRef State);
+
+ LLVM_NODISCARD QualType getType() const {
+ return getRepresentativeSymbol()->getType();
+ }
+
+ EquivalenceClass() = delete;
+ EquivalenceClass(const EquivalenceClass &) = default;
+ EquivalenceClass &operator=(const EquivalenceClass &) = delete;
+ EquivalenceClass(EquivalenceClass &&) = default;
+ EquivalenceClass &operator=(EquivalenceClass &&) = delete;
+
+ bool operator==(const EquivalenceClass &Other) const {
+ return ID == Other.ID;
+ }
+ bool operator<(const EquivalenceClass &Other) const { return ID < Other.ID; }
+ bool operator!=(const EquivalenceClass &Other) const {
+ return !operator==(Other);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, uintptr_t CID) {
+ ID.AddInteger(CID);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const { Profile(ID, this->ID); }
+
+private:
+ /* implicit */ EquivalenceClass(SymbolRef Sym)
+ : ID(reinterpret_cast<uintptr_t>(Sym)) {}
+
+ /// This function is intended to be used ONLY within the class.
+ /// The fact that ID is a pointer to a symbol is an implementation detail
+ /// and should stay that way.
+ /// In the current implementation, we use it to retrieve the only member
+ /// of the trivial class.
+ SymbolRef getRepresentativeSymbol() const {
+ return reinterpret_cast<SymbolRef>(ID);
+ }
+ static inline SymbolSet::Factory &getMembersFactory(ProgramStateRef State);
+
+ inline ProgramStateRef mergeImpl(BasicValueFactory &BV, RangeSet::Factory &F,
+ ProgramStateRef State, SymbolSet Members,
+ EquivalenceClass Other,
+ SymbolSet OtherMembers);
+ static inline void
+ addToDisequalityInfo(DisequalityMapTy &Info, ConstraintRangeTy &Constraints,
+ BasicValueFactory &BV, RangeSet::Factory &F,
+ ProgramStateRef State, EquivalenceClass First,
+ EquivalenceClass Second);
+
+ /// This is a unique identifier of the class.
+ uintptr_t ID;
+};
+
+//===----------------------------------------------------------------------===//
+// Constraint functions
+//===----------------------------------------------------------------------===//
+
+LLVM_NODISCARD inline const RangeSet *getConstraint(ProgramStateRef State,
+ EquivalenceClass Class) {
+ return State->get<ConstraintRange>(Class);
+}
+
+LLVM_NODISCARD inline const RangeSet *getConstraint(ProgramStateRef State,
+ SymbolRef Sym) {
+ return getConstraint(State, EquivalenceClass::find(State, Sym));
+}
+
+//===----------------------------------------------------------------------===//
+// Equality/diseqiality abstraction
+//===----------------------------------------------------------------------===//
+
+/// A small helper structure representing symbolic equality.
+///
+/// Equality check can have different forms (like a == b or a - b) and this
+/// class encapsulates those away if the only thing the user wants to check -
+/// whether it's equality/diseqiality or not and have an easy access to the
+/// compared symbols.
+struct EqualityInfo {
+public:
+ SymbolRef Left, Right;
+ // true for equality and false for disequality.
+ bool IsEquality = true;
+
+ void invert() { IsEquality = !IsEquality; }
+ /// Extract equality information from the given symbol and the constants.
+ ///
+ /// This function assumes the following expression Sym + Adjustment != Int.
+ /// It is a default because the most widespread case of the equality check
+ /// is (A == B) + 0 != 0.
+ static Optional<EqualityInfo> extract(SymbolRef Sym, const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // As of now, the only equality form supported is Sym + 0 != 0.
+ if (!Int.isNullValue() || !Adjustment.isNullValue())
+ return llvm::None;
+
+ return extract(Sym);
+ }
+ /// Extract equality information from the given symbol.
+ static Optional<EqualityInfo> extract(SymbolRef Sym) {
+ return EqualityExtractor().Visit(Sym);
+ }
+
+private:
+ class EqualityExtractor
+ : public SymExprVisitor<EqualityExtractor, Optional<EqualityInfo>> {
+ public:
+ Optional<EqualityInfo> VisitSymSymExpr(const SymSymExpr *Sym) const {
+ switch (Sym->getOpcode()) {
+ case BO_Sub:
+ // This case is: A - B != 0 -> disequality check.
+ return EqualityInfo{Sym->getLHS(), Sym->getRHS(), false};
+ case BO_EQ:
+ // This case is: A == B != 0 -> equality check.
+ return EqualityInfo{Sym->getLHS(), Sym->getRHS(), true};
+ case BO_NE:
+ // This case is: A != B != 0 -> diseqiality check.
+ return EqualityInfo{Sym->getLHS(), Sym->getRHS(), false};
+ default:
+ return llvm::None;
+ }
+ }
+ };
+};
+
+//===----------------------------------------------------------------------===//
+// Intersection functions
+//===----------------------------------------------------------------------===//
+
+template <class SecondTy, class... RestTy>
+LLVM_NODISCARD inline RangeSet intersect(BasicValueFactory &BV,
+ RangeSet::Factory &F, RangeSet Head,
+ SecondTy Second, RestTy... Tail);
+
+template <class... RangeTy> struct IntersectionTraits;
+
+template <class... TailTy> struct IntersectionTraits<RangeSet, TailTy...> {
+ // Found RangeSet, no need to check any further
+ using Type = RangeSet;
+};
+
+template <> struct IntersectionTraits<> {
+ // We ran out of types, and we didn't find any RangeSet, so the result should
+ // be optional.
+ using Type = Optional<RangeSet>;
+};
+
+template <class OptionalOrPointer, class... TailTy>
+struct IntersectionTraits<OptionalOrPointer, TailTy...> {
+ // If current type is Optional or a raw pointer, we should keep looking.
+ using Type = typename IntersectionTraits<TailTy...>::Type;
+};
+
+template <class EndTy>
+LLVM_NODISCARD inline EndTy intersect(BasicValueFactory &BV,
+ RangeSet::Factory &F, EndTy End) {
+ // If the list contains only RangeSet or Optional<RangeSet>, simply return
+ // that range set.
+ return End;
+}
+
+LLVM_NODISCARD LLVM_ATTRIBUTE_UNUSED inline Optional<RangeSet>
+intersect(BasicValueFactory &BV, RangeSet::Factory &F, const RangeSet *End) {
+ // This is an extraneous conversion from a raw pointer into Optional<RangeSet>
+ if (End) {
+ return *End;
+ }
+ return llvm::None;
+}
+
+template <class... RestTy>
+LLVM_NODISCARD inline RangeSet intersect(BasicValueFactory &BV,
+ RangeSet::Factory &F, RangeSet Head,
+ RangeSet Second, RestTy... Tail) {
+ // Here we call either the <RangeSet,RangeSet,...> or <RangeSet,...> version
+ // of the function and can be sure that the result is RangeSet.
+ return intersect(BV, F, Head.Intersect(BV, F, Second), Tail...);
+}
+
+template <class SecondTy, class... RestTy>
+LLVM_NODISCARD inline RangeSet intersect(BasicValueFactory &BV,
+ RangeSet::Factory &F, RangeSet Head,
+ SecondTy Second, RestTy... Tail) {
+ if (Second) {
+ // Here we call the <RangeSet,RangeSet,...> version of the function...
+ return intersect(BV, F, Head, *Second, Tail...);
+ }
+ // ...and here it is either <RangeSet,RangeSet,...> or <RangeSet,...>, which
+ // means that the result is definitely RangeSet.
+ return intersect(BV, F, Head, Tail...);
+}
+
+/// Main generic intersect function.
+/// It intersects all of the given range sets. If some of the given arguments
+/// don't hold a range set (nullptr or llvm::None), the function will skip them.
+///
+/// Available representations for the arguments are:
+/// * RangeSet
+/// * Optional<RangeSet>
+/// * RangeSet *
+/// Pointer to a RangeSet is automatically assumed to be nullable and will get
+/// checked as well as the optional version. If this behaviour is undesired,
+/// please dereference the pointer in the call.
+///
+/// Return type depends on the arguments' types. If we can be sure in compile
+/// time that there will be a range set as a result, the returning type is
+/// simply RangeSet, in other cases we have to back off to Optional<RangeSet>.
+///
+/// Please, prefer optional range sets to raw pointers. If the last argument is
+/// a raw pointer and all previous arguments are None, it will cost one
+/// additional check to convert RangeSet * into Optional<RangeSet>.
+template <class HeadTy, class SecondTy, class... RestTy>
+LLVM_NODISCARD inline
+ typename IntersectionTraits<HeadTy, SecondTy, RestTy...>::Type
+ intersect(BasicValueFactory &BV, RangeSet::Factory &F, HeadTy Head,
+ SecondTy Second, RestTy... Tail) {
+ if (Head) {
+ return intersect(BV, F, *Head, Second, Tail...);
+ }
+ return intersect(BV, F, Second, Tail...);
+}
+
+//===----------------------------------------------------------------------===//
+// Symbolic reasoning logic
+//===----------------------------------------------------------------------===//
/// A little component aggregating all of the reasoning we have about
/// the ranges of symbolic expressions.
@@ -389,10 +709,11 @@ namespace {
class SymbolicRangeInferrer
: public SymExprVisitor<SymbolicRangeInferrer, RangeSet> {
public:
+ template <class SourceType>
static RangeSet inferRange(BasicValueFactory &BV, RangeSet::Factory &F,
- ProgramStateRef State, SymbolRef Sym) {
+ ProgramStateRef State, SourceType Origin) {
SymbolicRangeInferrer Inferrer(BV, F, State);
- return Inferrer.infer(Sym);
+ return Inferrer.infer(Origin);
}
RangeSet VisitSymExpr(SymbolRef Sym) {
@@ -442,37 +763,35 @@ private:
}
RangeSet infer(SymbolRef Sym) {
- const RangeSet *AssociatedRange = State->get<ConstraintRange>(Sym);
-
- // If Sym is a difference of symbols A - B, then maybe we have range set
- // stored for B - A.
- const RangeSet *RangeAssociatedWithNegatedSym =
- getRangeForMinusSymbol(State, Sym);
-
- // If we have range set stored for both A - B and B - A then calculate the
- // effective range set by intersecting the range set for A - B and the
- // negated range set of B - A.
- if (AssociatedRange && RangeAssociatedWithNegatedSym)
- return AssociatedRange->Intersect(
- ValueFactory, RangeFactory,
- RangeAssociatedWithNegatedSym->Negate(ValueFactory, RangeFactory));
-
- if (AssociatedRange)
- return *AssociatedRange;
-
- if (RangeAssociatedWithNegatedSym)
- return RangeAssociatedWithNegatedSym->Negate(ValueFactory, RangeFactory);
+ if (Optional<RangeSet> ConstraintBasedRange = intersect(
+ ValueFactory, RangeFactory, getConstraint(State, Sym),
+ // If Sym is a difference of symbols A - B, then maybe we have range
+ // set stored for B - A.
+ //
+ // If we have range set stored for both A - B and B - A then
+ // calculate the effective range set by intersecting the range set
+ // for A - B and the negated range set of B - A.
+ getRangeForNegatedSub(Sym), getRangeForEqualities(Sym))) {
+ return *ConstraintBasedRange;
+ }
// If Sym is a comparison expression (except <=>),
// find any other comparisons with the same operands.
// See function description.
- const RangeSet CmpRangeSet = getRangeForComparisonSymbol(State, Sym);
- if (!CmpRangeSet.isEmpty())
- return CmpRangeSet;
+ if (Optional<RangeSet> CmpRangeSet = getRangeForComparisonSymbol(Sym)) {
+ return *CmpRangeSet;
+ }
return Visit(Sym);
}
+ RangeSet infer(EquivalenceClass Class) {
+ if (const RangeSet *AssociatedConstraint = getConstraint(State, Class))
+ return *AssociatedConstraint;
+
+ return infer(Class.getType());
+ }
+
/// Infer range information solely from the type.
RangeSet infer(QualType T) {
// Lazily generate a new RangeSet representing all possible values for the
@@ -621,8 +940,7 @@ private:
/// Return a range set subtracting zero from \p Domain.
RangeSet assumeNonZero(RangeSet Domain, QualType T) {
APSIntType IntType = ValueFactory.getAPSIntType(T);
- return Domain.Intersect(ValueFactory, RangeFactory,
- ++IntType.getZeroValue(), --IntType.getZeroValue());
+ return Domain.Delete(ValueFactory, RangeFactory, IntType.getZeroValue());
}
// FIXME: Once SValBuilder supports unary minus, we should use SValBuilder to
@@ -630,23 +948,26 @@ private:
// symbol manually. This will allow us to support finding ranges of not
// only negated SymSymExpr-type expressions, but also of other, simpler
// expressions which we currently do not know how to negate.
- const RangeSet *getRangeForMinusSymbol(ProgramStateRef State, SymbolRef Sym) {
+ Optional<RangeSet> getRangeForNegatedSub(SymbolRef Sym) {
if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
if (SSE->getOpcode() == BO_Sub) {
QualType T = Sym->getType();
+
+ // Do not negate unsigned ranges
+ if (!T->isUnsignedIntegerOrEnumerationType() &&
+ !T->isSignedIntegerOrEnumerationType())
+ return llvm::None;
+
SymbolManager &SymMgr = State->getSymbolManager();
- SymbolRef negSym =
+ SymbolRef NegatedSym =
SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), T);
- if (const RangeSet *negV = State->get<ConstraintRange>(negSym)) {
- // Unsigned range set cannot be negated, unless it is [0, 0].
- if (T->isUnsignedIntegerOrEnumerationType() ||
- T->isSignedIntegerOrEnumerationType())
- return negV;
+ if (const RangeSet *NegatedRange = getConstraint(State, NegatedSym)) {
+ return NegatedRange->Negate(ValueFactory, RangeFactory);
}
}
}
- return nullptr;
+ return llvm::None;
}
// Returns ranges only for binary comparison operators (except <=>)
@@ -659,18 +980,16 @@ private:
// It covers all possible combinations (see CmpOpTable description).
// Note that `x` and `y` can also stand for subexpressions,
// not only for actual symbols.
- RangeSet getRangeForComparisonSymbol(ProgramStateRef State, SymbolRef Sym) {
- const RangeSet EmptyRangeSet = RangeFactory.getEmptySet();
-
- auto SSE = dyn_cast<SymSymExpr>(Sym);
+ Optional<RangeSet> getRangeForComparisonSymbol(SymbolRef Sym) {
+ const auto *SSE = dyn_cast<SymSymExpr>(Sym);
if (!SSE)
- return EmptyRangeSet;
+ return llvm::None;
BinaryOperatorKind CurrentOP = SSE->getOpcode();
// We currently do not support <=> (C++20).
if (!BinaryOperator::isComparisonOp(CurrentOP) || (CurrentOP == BO_Cmp))
- return EmptyRangeSet;
+ return llvm::None;
static const OperatorRelationsTable CmpOpTable{};
@@ -679,10 +998,6 @@ private:
QualType T = SSE->getType();
SymbolManager &SymMgr = State->getSymbolManager();
- const llvm::APSInt &Zero = ValueFactory.getValue(0, T);
- const llvm::APSInt &One = ValueFactory.getValue(1, T);
- const RangeSet TrueRangeSet(RangeFactory, One, One);
- const RangeSet FalseRangeSet(RangeFactory, Zero, Zero);
int UnknownStates = 0;
@@ -693,7 +1008,7 @@ private:
// Let's find an expression e.g. (x < y).
BinaryOperatorKind QueriedOP = OperatorRelationsTable::getOpFromIndex(i);
const SymSymExpr *SymSym = SymMgr.getSymSymExpr(LHS, QueriedOP, RHS, T);
- const RangeSet *QueriedRangeSet = State->get<ConstraintRange>(SymSym);
+ const RangeSet *QueriedRangeSet = getConstraint(State, SymSym);
// If ranges were not previously found,
// try to find a reversed expression (y > x).
@@ -701,7 +1016,7 @@ private:
const BinaryOperatorKind ROP =
BinaryOperator::reverseComparisonOp(QueriedOP);
SymSym = SymMgr.getSymSymExpr(RHS, ROP, LHS, T);
- QueriedRangeSet = State->get<ConstraintRange>(SymSym);
+ QueriedRangeSet = getConstraint(State, SymSym);
}
if (!QueriedRangeSet || QueriedRangeSet->isEmpty())
@@ -732,11 +1047,38 @@ private:
continue;
}
- return (BranchState == OperatorRelationsTable::True) ? TrueRangeSet
- : FalseRangeSet;
+ return (BranchState == OperatorRelationsTable::True) ? getTrueRange(T)
+ : getFalseRange(T);
+ }
+
+ return llvm::None;
+ }
+
+ Optional<RangeSet> getRangeForEqualities(SymbolRef Sym) {
+ Optional<EqualityInfo> Equality = EqualityInfo::extract(Sym);
+
+ if (!Equality)
+ return llvm::None;
+
+ if (Optional<bool> AreEqual = EquivalenceClass::areEqual(
+ State, Equality->Left, Equality->Right)) {
+ if (*AreEqual == Equality->IsEquality) {
+ return getTrueRange(Sym->getType());
+ }
+ return getFalseRange(Sym->getType());
}
- return EmptyRangeSet;
+ return llvm::None;
+ }
+
+ RangeSet getTrueRange(QualType T) {
+ RangeSet TypeRange = infer(T);
+ return assumeNonZero(TypeRange, T);
+ }
+
+ RangeSet getFalseRange(QualType T) {
+ const llvm::APSInt &Zero = ValueFactory.getValue(0, T);
+ return RangeSet(RangeFactory, Zero);
}
BasicValueFactory &ValueFactory;
@@ -744,6 +1086,10 @@ private:
ProgramStateRef State;
};
+//===----------------------------------------------------------------------===//
+// Range-based reasoning about symbolic operations
+//===----------------------------------------------------------------------===//
+
template <>
RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_Or>(Range LHS, Range RHS,
QualType T) {
@@ -904,6 +1250,10 @@ RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_Rem>(Range LHS,
return {RangeFactory, ValueFactory.getValue(Min), ValueFactory.getValue(Max)};
}
+//===----------------------------------------------------------------------===//
+// Constraint manager implementation details
+//===----------------------------------------------------------------------===//
+
class RangeConstraintManager : public RangedConstraintManager {
public:
RangeConstraintManager(ExprEngine *EE, SValBuilder &SVB)
@@ -915,7 +1265,11 @@ public:
bool haveEqualConstraints(ProgramStateRef S1,
ProgramStateRef S2) const override {
- return S1->get<ConstraintRange>() == S2->get<ConstraintRange>();
+ // NOTE: ClassMembers are as simple as back pointers for ClassMap,
+ // so comparing constraint ranges and class maps should be
+ // sufficient.
+ return S1->get<ConstraintRange>() == S2->get<ConstraintRange>() &&
+ S1->get<ClassMap>() == S2->get<ClassMap>();
}
bool canReasonAbout(SVal X) const override;
@@ -971,6 +1325,7 @@ private:
RangeSet::Factory F;
RangeSet getRange(ProgramStateRef State, SymbolRef Sym);
+ RangeSet getRange(ProgramStateRef State, EquivalenceClass Class);
RangeSet getSymLTRange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
@@ -987,6 +1342,87 @@ private:
RangeSet getSymGERange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment);
+
+ //===------------------------------------------------------------------===//
+ // Equality tracking implementation
+ //===------------------------------------------------------------------===//
+
+ ProgramStateRef trackEQ(RangeSet NewConstraint, ProgramStateRef State,
+ SymbolRef Sym, const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ return track<true>(NewConstraint, State, Sym, Int, Adjustment);
+ }
+
+ ProgramStateRef trackNE(RangeSet NewConstraint, ProgramStateRef State,
+ SymbolRef Sym, const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ return track<false>(NewConstraint, State, Sym, Int, Adjustment);
+ }
+
+ template <bool EQ>
+ ProgramStateRef track(RangeSet NewConstraint, ProgramStateRef State,
+ SymbolRef Sym, const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ if (NewConstraint.isEmpty())
+ // This is an infeasible assumption.
+ return nullptr;
+
+ ProgramStateRef NewState = setConstraint(State, Sym, NewConstraint);
+ if (auto Equality = EqualityInfo::extract(Sym, Int, Adjustment)) {
+ // If the original assumption is not Sym + Adjustment !=/</> Int,
+ // we should invert IsEquality flag.
+ Equality->IsEquality = Equality->IsEquality != EQ;
+ return track(NewState, *Equality);
+ }
+
+ return NewState;
+ }
+
+ ProgramStateRef track(ProgramStateRef State, EqualityInfo ToTrack) {
+ if (ToTrack.IsEquality) {
+ return trackEquality(State, ToTrack.Left, ToTrack.Right);
+ }
+ return trackDisequality(State, ToTrack.Left, ToTrack.Right);
+ }
+
+ ProgramStateRef trackDisequality(ProgramStateRef State, SymbolRef LHS,
+ SymbolRef RHS) {
+ return EquivalenceClass::markDisequal(getBasicVals(), F, State, LHS, RHS);
+ }
+
+ ProgramStateRef trackEquality(ProgramStateRef State, SymbolRef LHS,
+ SymbolRef RHS) {
+ return EquivalenceClass::merge(getBasicVals(), F, State, LHS, RHS);
+ }
+
+ LLVM_NODISCARD inline ProgramStateRef setConstraint(ProgramStateRef State,
+ EquivalenceClass Class,
+ RangeSet Constraint) {
+ ConstraintRangeTy Constraints = State->get<ConstraintRange>();
+ ConstraintRangeTy::Factory &CF = State->get_context<ConstraintRange>();
+
+ // Add new constraint.
+ Constraints = CF.add(Constraints, Class, Constraint);
+
+ // There is a chance that we might need to update constraints for the
+ // classes that are known to be disequal to Class.
+ //
+ // In order for this to be even possible, the new constraint should
+ // be simply a constant because we can't reason about range disequalities.
+ if (const llvm::APSInt *Point = Constraint.getConcreteValue())
+ for (EquivalenceClass DisequalClass : Class.getDisequalClasses(State)) {
+ RangeSet UpdatedConstraint =
+ getRange(State, DisequalClass).Delete(getBasicVals(), F, *Point);
+ Constraints = CF.add(Constraints, DisequalClass, UpdatedConstraint);
+ }
+
+ return State->set<ConstraintRange>(Constraints);
+ }
+
+ LLVM_NODISCARD inline ProgramStateRef
+ setConstraint(ProgramStateRef State, SymbolRef Sym, RangeSet Constraint) {
+ return setConstraint(State, EquivalenceClass::find(State, Sym), Constraint);
+ }
};
} // end anonymous namespace
@@ -997,6 +1433,372 @@ ento::CreateRangeConstraintManager(ProgramStateManager &StMgr,
return std::make_unique<RangeConstraintManager>(Eng, StMgr.getSValBuilder());
}
+ConstraintMap ento::getConstraintMap(ProgramStateRef State) {
+ ConstraintMap::Factory &F = State->get_context<ConstraintMap>();
+ ConstraintMap Result = F.getEmptyMap();
+
+ ConstraintRangeTy Constraints = State->get<ConstraintRange>();
+ for (std::pair<EquivalenceClass, RangeSet> ClassConstraint : Constraints) {
+ EquivalenceClass Class = ClassConstraint.first;
+ SymbolSet ClassMembers = Class.getClassMembers(State);
+ assert(!ClassMembers.isEmpty() &&
+ "Class must always have at least one member!");
+
+ SymbolRef Representative = *ClassMembers.begin();
+ Result = F.add(Result, Representative, ClassConstraint.second);
+ }
+
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// EqualityClass implementation details
+//===----------------------------------------------------------------------===//
+
+inline EquivalenceClass EquivalenceClass::find(ProgramStateRef State,
+ SymbolRef Sym) {
+ // We store far from all Symbol -> Class mappings
+ if (const EquivalenceClass *NontrivialClass = State->get<ClassMap>(Sym))
+ return *NontrivialClass;
+
+ // This is a trivial class of Sym.
+ return Sym;
+}
+
+inline ProgramStateRef EquivalenceClass::merge(BasicValueFactory &BV,
+ RangeSet::Factory &F,
+ ProgramStateRef State,
+ SymbolRef First,
+ SymbolRef Second) {
+ EquivalenceClass FirstClass = find(State, First);
+ EquivalenceClass SecondClass = find(State, Second);
+
+ return FirstClass.merge(BV, F, State, SecondClass);
+}
+
+inline ProgramStateRef EquivalenceClass::merge(BasicValueFactory &BV,
+ RangeSet::Factory &F,
+ ProgramStateRef State,
+ EquivalenceClass Other) {
+ // It is already the same class.
+ if (*this == Other)
+ return State;
+
+ // FIXME: As of now, we support only equivalence classes of the same type.
+ // This limitation is connected to the lack of explicit casts in
+ // our symbolic expression model.
+ //
+ // That means that for `int x` and `char y` we don't distinguish
+ // between these two very different cases:
+ // * `x == y`
+ // * `(char)x == y`
+ //
+ // The moment we introduce symbolic casts, this restriction can be
+ // lifted.
+ if (getType() != Other.getType())
+ return State;
+
+ SymbolSet Members = getClassMembers(State);
+ SymbolSet OtherMembers = Other.getClassMembers(State);
+
+ // We estimate the size of the class by the height of tree containing
+ // its members. Merging is not a trivial operation, so it's easier to
+ // merge the smaller class into the bigger one.
+ if (Members.getHeight() >= OtherMembers.getHeight()) {
+ return mergeImpl(BV, F, State, Members, Other, OtherMembers);
+ } else {
+ return Other.mergeImpl(BV, F, State, OtherMembers, *this, Members);
+ }
+}
+
+inline ProgramStateRef
+EquivalenceClass::mergeImpl(BasicValueFactory &ValueFactory,
+ RangeSet::Factory &RangeFactory,
+ ProgramStateRef State, SymbolSet MyMembers,
+ EquivalenceClass Other, SymbolSet OtherMembers) {
+ // Essentially what we try to recreate here is some kind of union-find
+ // data structure. It does have certain limitations due to persistence
+ // and the need to remove elements from classes.
+ //
+ // In this setting, EquialityClass object is the representative of the class
+ // or the parent element. ClassMap is a mapping of class members to their
+ // parent. Unlike the union-find structure, they all point directly to the
+ // class representative because we don't have an opportunity to actually do
+ // path compression when dealing with immutability. This means that we
+ // compress paths every time we do merges. It also means that we lose
+ // the main amortized complexity benefit from the original data structure.
+ ConstraintRangeTy Constraints = State->get<ConstraintRange>();
+ ConstraintRangeTy::Factory &CRF = State->get_context<ConstraintRange>();
+
+ // 1. If the merged classes have any constraints associated with them, we
+ // need to transfer them to the class we have left.
+ //
+ // Intersection here makes perfect sense because both of these constraints
+ // must hold for the whole new class.
+ if (Optional<RangeSet> NewClassConstraint =
+ intersect(ValueFactory, RangeFactory, getConstraint(State, *this),
+ getConstraint(State, Other))) {
+ // NOTE: Essentially, NewClassConstraint should NEVER be infeasible because
+ // range inferrer shouldn't generate ranges incompatible with
+ // equivalence classes. However, at the moment, due to imperfections
+ // in the solver, it is possible and the merge function can also
+ // return infeasible states aka null states.
+ if (NewClassConstraint->isEmpty())
+ // Infeasible state
+ return nullptr;
+
+ // No need in tracking constraints of a now-dissolved class.
+ Constraints = CRF.remove(Constraints, Other);
+ // Assign new constraints for this class.
+ Constraints = CRF.add(Constraints, *this, *NewClassConstraint);
+
+ State = State->set<ConstraintRange>(Constraints);
+ }
+
+ // 2. Get ALL equivalence-related maps
+ ClassMapTy Classes = State->get<ClassMap>();
+ ClassMapTy::Factory &CMF = State->get_context<ClassMap>();
+
+ ClassMembersTy Members = State->get<ClassMembers>();
+ ClassMembersTy::Factory &MF = State->get_context<ClassMembers>();
+
+ DisequalityMapTy DisequalityInfo = State->get<DisequalityMap>();
+ DisequalityMapTy::Factory &DF = State->get_context<DisequalityMap>();
+
+ ClassSet::Factory &CF = State->get_context<ClassSet>();
+ SymbolSet::Factory &F = getMembersFactory(State);
+
+ // 2. Merge members of the Other class into the current class.
+ SymbolSet NewClassMembers = MyMembers;
+ for (SymbolRef Sym : OtherMembers) {
+ NewClassMembers = F.add(NewClassMembers, Sym);
+ // *this is now the class for all these new symbols.
+ Classes = CMF.add(Classes, Sym, *this);
+ }
+
+ // 3. Adjust member mapping.
+ //
+ // No need in tracking members of a now-dissolved class.
+ Members = MF.remove(Members, Other);
+ // Now only the current class is mapped to all the symbols.
+ Members = MF.add(Members, *this, NewClassMembers);
+
+ // 4. Update disequality relations
+ ClassSet DisequalToOther = Other.getDisequalClasses(DisequalityInfo, CF);
+ if (!DisequalToOther.isEmpty()) {
+ ClassSet DisequalToThis = getDisequalClasses(DisequalityInfo, CF);
+ DisequalityInfo = DF.remove(DisequalityInfo, Other);
+
+ for (EquivalenceClass DisequalClass : DisequalToOther) {
+ DisequalToThis = CF.add(DisequalToThis, DisequalClass);
+
+ // Disequality is a symmetric relation meaning that if
+ // DisequalToOther not null then the set for DisequalClass is not
+ // empty and has at least Other.
+ ClassSet OriginalSetLinkedToOther =
+ *DisequalityInfo.lookup(DisequalClass);
+
+ // Other will be eliminated and we should replace it with the bigger
+ // united class.
+ ClassSet NewSet = CF.remove(OriginalSetLinkedToOther, Other);
+ NewSet = CF.add(NewSet, *this);
+
+ DisequalityInfo = DF.add(DisequalityInfo, DisequalClass, NewSet);
+ }
+
+ DisequalityInfo = DF.add(DisequalityInfo, *this, DisequalToThis);
+ State = State->set<DisequalityMap>(DisequalityInfo);
+ }
+
+ // 5. Update the state
+ State = State->set<ClassMap>(Classes);
+ State = State->set<ClassMembers>(Members);
+
+ return State;
+}
+
+inline SymbolSet::Factory &
+EquivalenceClass::getMembersFactory(ProgramStateRef State) {
+ return State->get_context<SymbolSet>();
+}
+
+SymbolSet EquivalenceClass::getClassMembers(ProgramStateRef State) {
+ if (const SymbolSet *Members = State->get<ClassMembers>(*this))
+ return *Members;
+
+ // This class is trivial, so we need to construct a set
+ // with just that one symbol from the class.
+ SymbolSet::Factory &F = getMembersFactory(State);
+ return F.add(F.getEmptySet(), getRepresentativeSymbol());
+}
+
+bool EquivalenceClass::isTrivial(ProgramStateRef State) {
+ return State->get<ClassMembers>(*this) == nullptr;
+}
+
+bool EquivalenceClass::isTriviallyDead(ProgramStateRef State,
+ SymbolReaper &Reaper) {
+ return isTrivial(State) && Reaper.isDead(getRepresentativeSymbol());
+}
+
+inline ProgramStateRef EquivalenceClass::markDisequal(BasicValueFactory &VF,
+ RangeSet::Factory &RF,
+ ProgramStateRef State,
+ SymbolRef First,
+ SymbolRef Second) {
+ return markDisequal(VF, RF, State, find(State, First), find(State, Second));
+}
+
+inline ProgramStateRef EquivalenceClass::markDisequal(BasicValueFactory &VF,
+ RangeSet::Factory &RF,
+ ProgramStateRef State,
+ EquivalenceClass First,
+ EquivalenceClass Second) {
+ return First.markDisequal(VF, RF, State, Second);
+}
+
+inline ProgramStateRef
+EquivalenceClass::markDisequal(BasicValueFactory &VF, RangeSet::Factory &RF,
+ ProgramStateRef State,
+ EquivalenceClass Other) const {
+ // If we know that two classes are equal, we can only produce an infeasible
+ // state.
+ if (*this == Other) {
+ return nullptr;
+ }
+
+ DisequalityMapTy DisequalityInfo = State->get<DisequalityMap>();
+ ConstraintRangeTy Constraints = State->get<ConstraintRange>();
+
+ // Disequality is a symmetric relation, so if we mark A as disequal to B,
+ // we should also mark B as disequalt to A.
+ addToDisequalityInfo(DisequalityInfo, Constraints, VF, RF, State, *this,
+ Other);
+ addToDisequalityInfo(DisequalityInfo, Constraints, VF, RF, State, Other,
+ *this);
+
+ State = State->set<DisequalityMap>(DisequalityInfo);
+ State = State->set<ConstraintRange>(Constraints);
+
+ return State;
+}
+
+inline void EquivalenceClass::addToDisequalityInfo(
+ DisequalityMapTy &Info, ConstraintRangeTy &Constraints,
+ BasicValueFactory &VF, RangeSet::Factory &RF, ProgramStateRef State,
+ EquivalenceClass First, EquivalenceClass Second) {
+
+ // 1. Get all of the required factories.
+ DisequalityMapTy::Factory &F = State->get_context<DisequalityMap>();
+ ClassSet::Factory &CF = State->get_context<ClassSet>();
+ ConstraintRangeTy::Factory &CRF = State->get_context<ConstraintRange>();
+
+ // 2. Add Second to the set of classes disequal to First.
+ const ClassSet *CurrentSet = Info.lookup(First);
+ ClassSet NewSet = CurrentSet ? *CurrentSet : CF.getEmptySet();
+ NewSet = CF.add(NewSet, Second);
+
+ Info = F.add(Info, First, NewSet);
+
+ // 3. If Second is known to be a constant, we can delete this point
+ // from the constraint asociated with First.
+ //
+ // So, if Second == 10, it means that First != 10.
+ // At the same time, the same logic does not apply to ranges.
+ if (const RangeSet *SecondConstraint = Constraints.lookup(Second))
+ if (const llvm::APSInt *Point = SecondConstraint->getConcreteValue()) {
+
+ RangeSet FirstConstraint = SymbolicRangeInferrer::inferRange(
+ VF, RF, State, First.getRepresentativeSymbol());
+
+ FirstConstraint = FirstConstraint.Delete(VF, RF, *Point);
+ Constraints = CRF.add(Constraints, First, FirstConstraint);
+ }
+}
+
+inline Optional<bool> EquivalenceClass::areEqual(ProgramStateRef State,
+ SymbolRef FirstSym,
+ SymbolRef SecondSym) {
+ EquivalenceClass First = find(State, FirstSym);
+ EquivalenceClass Second = find(State, SecondSym);
+
+ // The same equivalence class => symbols are equal.
+ if (First == Second)
+ return true;
+
+ // Let's check if we know anything about these two classes being not equal to
+ // each other.
+ ClassSet DisequalToFirst = First.getDisequalClasses(State);
+ if (DisequalToFirst.contains(Second))
+ return false;
+
+ // It is not clear.
+ return llvm::None;
+}
+
+inline ClassSet EquivalenceClass::getDisequalClasses(ProgramStateRef State,
+ SymbolRef Sym) {
+ return find(State, Sym).getDisequalClasses(State);
+}
+
+inline ClassSet
+EquivalenceClass::getDisequalClasses(ProgramStateRef State) const {
+ return getDisequalClasses(State->get<DisequalityMap>(),
+ State->get_context<ClassSet>());
+}
+
+inline ClassSet
+EquivalenceClass::getDisequalClasses(DisequalityMapTy Map,
+ ClassSet::Factory &Factory) const {
+ if (const ClassSet *DisequalClasses = Map.lookup(*this))
+ return *DisequalClasses;
+
+ return Factory.getEmptySet();
+}
+
+bool EquivalenceClass::isClassDataConsistent(ProgramStateRef State) {
+ ClassMembersTy Members = State->get<ClassMembers>();
+
+ for (std::pair<EquivalenceClass, SymbolSet> ClassMembersPair : Members) {
+ for (SymbolRef Member : ClassMembersPair.second) {
+ // Every member of the class should have a mapping back to the class.
+ if (find(State, Member) == ClassMembersPair.first) {
+ continue;
+ }
+
+ return false;
+ }
+ }
+
+ DisequalityMapTy Disequalities = State->get<DisequalityMap>();
+ for (std::pair<EquivalenceClass, ClassSet> DisequalityInfo : Disequalities) {
+ EquivalenceClass Class = DisequalityInfo.first;
+ ClassSet DisequalClasses = DisequalityInfo.second;
+
+ // There is no use in keeping empty sets in the map.
+ if (DisequalClasses.isEmpty())
+ return false;
+
+ // Disequality is symmetrical, i.e. for every Class A and B that A != B,
+ // B != A should also be true.
+ for (EquivalenceClass DisequalClass : DisequalClasses) {
+ const ClassSet *DisequalToDisequalClasses =
+ Disequalities.lookup(DisequalClass);
+
+ // It should be a set of at least one element: Class
+ if (!DisequalToDisequalClasses ||
+ !DisequalToDisequalClasses->contains(Class))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// RangeConstraintManager implementation
+//===----------------------------------------------------------------------===//
+
bool RangeConstraintManager::canReasonAbout(SVal X) const {
Optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
if (SymVal && SymVal->isExpression()) {
@@ -1045,7 +1847,7 @@ bool RangeConstraintManager::canReasonAbout(SVal X) const {
ConditionTruthVal RangeConstraintManager::checkNull(ProgramStateRef State,
SymbolRef Sym) {
- const RangeSet *Ranges = State->get<ConstraintRange>(Sym);
+ const RangeSet *Ranges = getConstraint(State, Sym);
// If we don't have any information about this symbol, it's underconstrained.
if (!Ranges)
@@ -1069,28 +1871,148 @@ ConditionTruthVal RangeConstraintManager::checkNull(ProgramStateRef State,
const llvm::APSInt *RangeConstraintManager::getSymVal(ProgramStateRef St,
SymbolRef Sym) const {
- const ConstraintRangeTy::data_type *T = St->get<ConstraintRange>(Sym);
+ const RangeSet *T = getConstraint(St, Sym);
return T ? T->getConcreteValue() : nullptr;
}
+//===----------------------------------------------------------------------===//
+// Remove dead symbols from existing constraints
+//===----------------------------------------------------------------------===//
+
/// Scan all symbols referenced by the constraints. If the symbol is not alive
/// as marked in LSymbols, mark it as dead in DSymbols.
ProgramStateRef
RangeConstraintManager::removeDeadBindings(ProgramStateRef State,
SymbolReaper &SymReaper) {
- bool Changed = false;
- ConstraintRangeTy CR = State->get<ConstraintRange>();
- ConstraintRangeTy::Factory &CRFactory = State->get_context<ConstraintRange>();
+ ClassMembersTy ClassMembersMap = State->get<ClassMembers>();
+ ClassMembersTy NewClassMembersMap = ClassMembersMap;
+ ClassMembersTy::Factory &EMFactory = State->get_context<ClassMembers>();
+ SymbolSet::Factory &SetFactory = State->get_context<SymbolSet>();
+
+ ConstraintRangeTy Constraints = State->get<ConstraintRange>();
+ ConstraintRangeTy NewConstraints = Constraints;
+ ConstraintRangeTy::Factory &ConstraintFactory =
+ State->get_context<ConstraintRange>();
+
+ ClassMapTy Map = State->get<ClassMap>();
+ ClassMapTy NewMap = Map;
+ ClassMapTy::Factory &ClassFactory = State->get_context<ClassMap>();
+
+ DisequalityMapTy Disequalities = State->get<DisequalityMap>();
+ DisequalityMapTy::Factory &DisequalityFactory =
+ State->get_context<DisequalityMap>();
+ ClassSet::Factory &ClassSetFactory = State->get_context<ClassSet>();
+
+ bool ClassMapChanged = false;
+ bool MembersMapChanged = false;
+ bool ConstraintMapChanged = false;
+ bool DisequalitiesChanged = false;
+
+ auto removeDeadClass = [&](EquivalenceClass Class) {
+ // Remove associated constraint ranges.
+ Constraints = ConstraintFactory.remove(Constraints, Class);
+ ConstraintMapChanged = true;
+
+ // Update disequality information to not hold any information on the
+ // removed class.
+ ClassSet DisequalClasses =
+ Class.getDisequalClasses(Disequalities, ClassSetFactory);
+ if (!DisequalClasses.isEmpty()) {
+ for (EquivalenceClass DisequalClass : DisequalClasses) {
+ ClassSet DisequalToDisequalSet =
+ DisequalClass.getDisequalClasses(Disequalities, ClassSetFactory);
+ // DisequalToDisequalSet is guaranteed to be non-empty for consistent
+ // disequality info.
+ assert(!DisequalToDisequalSet.isEmpty());
+ ClassSet NewSet = ClassSetFactory.remove(DisequalToDisequalSet, Class);
+
+ // No need in keeping an empty set.
+ if (NewSet.isEmpty()) {
+ Disequalities =
+ DisequalityFactory.remove(Disequalities, DisequalClass);
+ } else {
+ Disequalities =
+ DisequalityFactory.add(Disequalities, DisequalClass, NewSet);
+ }
+ }
+ // Remove the data for the class
+ Disequalities = DisequalityFactory.remove(Disequalities, Class);
+ DisequalitiesChanged = true;
+ }
+ };
+
+ // 1. Let's see if dead symbols are trivial and have associated constraints.
+ for (std::pair<EquivalenceClass, RangeSet> ClassConstraintPair :
+ Constraints) {
+ EquivalenceClass Class = ClassConstraintPair.first;
+ if (Class.isTriviallyDead(State, SymReaper)) {
+ // If this class is trivial, we can remove its constraints right away.
+ removeDeadClass(Class);
+ }
+ }
+
+ // 2. We don't need to track classes for dead symbols.
+ for (std::pair<SymbolRef, EquivalenceClass> SymbolClassPair : Map) {
+ SymbolRef Sym = SymbolClassPair.first;
- for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
- SymbolRef Sym = I.getKey();
if (SymReaper.isDead(Sym)) {
- Changed = true;
- CR = CRFactory.remove(CR, Sym);
+ ClassMapChanged = true;
+ NewMap = ClassFactory.remove(NewMap, Sym);
}
}
- return Changed ? State->set<ConstraintRange>(CR) : State;
+ // 3. Remove dead members from classes and remove dead non-trivial classes
+ // and their constraints.
+ for (std::pair<EquivalenceClass, SymbolSet> ClassMembersPair :
+ ClassMembersMap) {
+ EquivalenceClass Class = ClassMembersPair.first;
+ SymbolSet LiveMembers = ClassMembersPair.second;
+ bool MembersChanged = false;
+
+ for (SymbolRef Member : ClassMembersPair.second) {
+ if (SymReaper.isDead(Member)) {
+ MembersChanged = true;
+ LiveMembers = SetFactory.remove(LiveMembers, Member);
+ }
+ }
+
+ // Check if the class changed.
+ if (!MembersChanged)
+ continue;
+
+ MembersMapChanged = true;
+
+ if (LiveMembers.isEmpty()) {
+ // The class is dead now, we need to wipe it out of the members map...
+ NewClassMembersMap = EMFactory.remove(NewClassMembersMap, Class);
+
+ // ...and remove all of its constraints.
+ removeDeadClass(Class);
+ } else {
+ // We need to change the members associated with the class.
+ NewClassMembersMap =
+ EMFactory.add(NewClassMembersMap, Class, LiveMembers);
+ }
+ }
+
+ // 4. Update the state with new maps.
+ //
+ // Here we try to be humble and update a map only if it really changed.
+ if (ClassMapChanged)
+ State = State->set<ClassMap>(NewMap);
+
+ if (MembersMapChanged)
+ State = State->set<ClassMembers>(NewClassMembersMap);
+
+ if (ConstraintMapChanged)
+ State = State->set<ConstraintRange>(Constraints);
+
+ if (DisequalitiesChanged)
+ State = State->set<DisequalityMap>(Disequalities);
+
+ assert(EquivalenceClass::isClassDataConsistent(State));
+
+ return State;
}
RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
@@ -1098,6 +2020,11 @@ RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
return SymbolicRangeInferrer::inferRange(getBasicVals(), F, State, Sym);
}
+RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
+ EquivalenceClass Class) {
+ return SymbolicRangeInferrer::inferRange(getBasicVals(), F, State, Class);
+}
+
//===------------------------------------------------------------------------===
// assumeSymX methods: protected interface for RangeConstraintManager.
//===------------------------------------------------------------------------===/
@@ -1119,15 +2046,11 @@ RangeConstraintManager::assumeSymNE(ProgramStateRef St, SymbolRef Sym,
if (AdjustmentType.testInRange(Int, true) != APSIntType::RTR_Within)
return St;
- llvm::APSInt Lower = AdjustmentType.convert(Int) - Adjustment;
- llvm::APSInt Upper = Lower;
- --Lower;
- ++Upper;
+ llvm::APSInt Point = AdjustmentType.convert(Int) - Adjustment;
- // [Int-Adjustment+1, Int-Adjustment-1]
- // Notice that the lower bound is greater than the upper bound.
- RangeSet New = getRange(St, Sym).Intersect(getBasicVals(), F, Upper, Lower);
- return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+ RangeSet New = getRange(St, Sym).Delete(getBasicVals(), F, Point);
+
+ return trackNE(New, St, Sym, Int, Adjustment);
}
ProgramStateRef
@@ -1142,7 +2065,8 @@ RangeConstraintManager::assumeSymEQ(ProgramStateRef St, SymbolRef Sym,
// [Int-Adjustment, Int-Adjustment]
llvm::APSInt AdjInt = AdjustmentType.convert(Int) - Adjustment;
RangeSet New = getRange(St, Sym).Intersect(getBasicVals(), F, AdjInt, AdjInt);
- return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+
+ return trackEQ(New, St, Sym, Int, Adjustment);
}
RangeSet RangeConstraintManager::getSymLTRange(ProgramStateRef St,
@@ -1178,7 +2102,7 @@ RangeConstraintManager::assumeSymLT(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
RangeSet New = getSymLTRange(St, Sym, Int, Adjustment);
- return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+ return trackNE(New, St, Sym, Int, Adjustment);
}
RangeSet RangeConstraintManager::getSymGTRange(ProgramStateRef St,
@@ -1214,7 +2138,7 @@ RangeConstraintManager::assumeSymGT(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
RangeSet New = getSymGTRange(St, Sym, Int, Adjustment);
- return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+ return trackNE(New, St, Sym, Int, Adjustment);
}
RangeSet RangeConstraintManager::getSymGERange(ProgramStateRef St,
@@ -1250,13 +2174,13 @@ RangeConstraintManager::assumeSymGE(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
RangeSet New = getSymGERange(St, Sym, Int, Adjustment);
- return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+ return New.isEmpty() ? nullptr : setConstraint(St, Sym, New);
}
-RangeSet RangeConstraintManager::getSymLERange(
- llvm::function_ref<RangeSet()> RS,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+RangeSet
+RangeConstraintManager::getSymLERange(llvm::function_ref<RangeSet()> RS,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
@@ -1293,7 +2217,7 @@ RangeConstraintManager::assumeSymLE(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
RangeSet New = getSymLERange(St, Sym, Int, Adjustment);
- return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+ return New.isEmpty() ? nullptr : setConstraint(St, Sym, New);
}
ProgramStateRef RangeConstraintManager::assumeSymWithinInclusiveRange(
@@ -1303,7 +2227,7 @@ ProgramStateRef RangeConstraintManager::assumeSymWithinInclusiveRange(
if (New.isEmpty())
return nullptr;
RangeSet Out = getSymLERange([&] { return New; }, To, Adjustment);
- return Out.isEmpty() ? nullptr : State->set<ConstraintRange>(Sym, Out);
+ return Out.isEmpty() ? nullptr : setConstraint(State, Sym, Out);
}
ProgramStateRef RangeConstraintManager::assumeSymOutsideInclusiveRange(
@@ -1312,7 +2236,7 @@ ProgramStateRef RangeConstraintManager::assumeSymOutsideInclusiveRange(
RangeSet RangeLT = getSymLTRange(State, Sym, From, Adjustment);
RangeSet RangeGT = getSymGTRange(State, Sym, To, Adjustment);
RangeSet New(RangeLT.addRange(F, RangeGT));
- return New.isEmpty() ? nullptr : State->set<ConstraintRange>(Sym, New);
+ return New.isEmpty() ? nullptr : setConstraint(State, Sym, New);
}
//===----------------------------------------------------------------------===//
@@ -1332,17 +2256,25 @@ void RangeConstraintManager::printJson(raw_ostream &Out, ProgramStateRef State,
++Space;
Out << '[' << NL;
- for (ConstraintRangeTy::iterator I = Constraints.begin();
- I != Constraints.end(); ++I) {
- Indent(Out, Space, IsDot)
- << "{ \"symbol\": \"" << I.getKey() << "\", \"range\": \"";
- I.getData().print(Out);
- Out << "\" }";
-
- if (std::next(I) != Constraints.end())
- Out << ',';
- Out << NL;
+ bool First = true;
+ for (std::pair<EquivalenceClass, RangeSet> P : Constraints) {
+ SymbolSet ClassMembers = P.first.getClassMembers(State);
+
+ // We can print the same constraint for every class member.
+ for (SymbolRef ClassMember : ClassMembers) {
+ if (First) {
+ First = false;
+ } else {
+ Out << ',';
+ Out << NL;
+ }
+ Indent(Out, Space, IsDot)
+ << "{ \"symbol\": \"" << ClassMember << "\", \"range\": \"";
+ P.second.print(Out);
+ Out << "\" }";
+ }
}
+ Out << NL;
--Space;
Indent(Out, Space, IsDot) << "]," << NL;
diff --git a/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
index 4748c106eb55..e7a03e6ed582 100644
--- a/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
@@ -40,19 +40,20 @@ ProgramStateRef RangedConstraintManager::assumeSym(ProgramStateRef State,
}
} else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
- // Translate "a != b" to "(b - a) != 0".
- // We invert the order of the operands as a heuristic for how loop
- // conditions are usually written ("begin != end") as compared to length
- // calculations ("end - begin"). The more correct thing to do would be to
- // canonicalize "a - b" and "b - a", which would allow us to treat
- // "a != b" and "b != a" the same.
- SymbolManager &SymMgr = getSymbolManager();
BinaryOperator::Opcode Op = SSE->getOpcode();
assert(BinaryOperator::isComparisonOp(Op));
- // For now, we only support comparing pointers.
+ // We convert equality operations for pointers only.
if (Loc::isLocType(SSE->getLHS()->getType()) &&
Loc::isLocType(SSE->getRHS()->getType())) {
+ // Translate "a != b" to "(b - a) != 0".
+ // We invert the order of the operands as a heuristic for how loop
+ // conditions are usually written ("begin != end") as compared to length
+ // calculations ("end - begin"). The more correct thing to do would be to
+ // canonicalize "a - b" and "b - a", which would allow us to treat
+ // "a != b" and "b != a" the same.
+
+ SymbolManager &SymMgr = getSymbolManager();
QualType DiffTy = SymMgr.getContext().getPointerDiffType();
SymbolRef Subtraction =
SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), DiffTy);
@@ -63,6 +64,25 @@ ProgramStateRef RangedConstraintManager::assumeSym(ProgramStateRef State,
Op = BinaryOperator::negateComparisonOp(Op);
return assumeSymRel(State, Subtraction, Op, Zero);
}
+
+ if (BinaryOperator::isEqualityOp(Op)) {
+ SymbolManager &SymMgr = getSymbolManager();
+
+ QualType ExprType = SSE->getType();
+ SymbolRef CanonicalEquality =
+ SymMgr.getSymSymExpr(SSE->getLHS(), BO_EQ, SSE->getRHS(), ExprType);
+
+ bool WasEqual = SSE->getOpcode() == BO_EQ;
+ bool IsExpectedEqual = WasEqual == Assumption;
+
+ const llvm::APSInt &Zero = getBasicVals().getValue(0, ExprType);
+
+ if (IsExpectedEqual) {
+ return assumeSymNE(State, CanonicalEquality, Zero, Zero);
+ }
+
+ return assumeSymEQ(State, CanonicalEquality, Zero, Zero);
+ }
}
// If we get here, there's nothing else we can do but treat the symbol as
@@ -199,11 +219,6 @@ void RangedConstraintManager::computeAdjustment(SymbolRef &Sym,
}
}
-void *ProgramStateTrait<ConstraintRange>::GDMIndex() {
- static int Index;
- return &Index;
-}
-
} // end of namespace ento
} // end of namespace clang
diff --git a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index c00a2c8ba8a2..72b8ada1dfab 100644
--- a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -236,10 +236,11 @@ SValBuilder::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
return nonloc::SymbolVal(sym);
}
-DefinedSVal SValBuilder::getMemberPointer(const DeclaratorDecl *DD) {
- assert(!DD || isa<CXXMethodDecl>(DD) || isa<FieldDecl>(DD));
+DefinedSVal SValBuilder::getMemberPointer(const NamedDecl *ND) {
+ assert(!ND || isa<CXXMethodDecl>(ND) || isa<FieldDecl>(ND) ||
+ isa<IndirectFieldDecl>(ND));
- if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(DD)) {
+ if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(ND)) {
// Sema treats pointers to static member functions as have function pointer
// type, so return a function pointer for the method.
// We don't need to play a similar trick for static member fields
@@ -249,7 +250,7 @@ DefinedSVal SValBuilder::getMemberPointer(const DeclaratorDecl *DD) {
return getFunctionPointer(MD);
}
- return nonloc::PointerToMember(DD);
+ return nonloc::PointerToMember(ND);
}
DefinedSVal SValBuilder::getFunctionPointer(const FunctionDecl *func) {
@@ -305,6 +306,14 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
return makeLoc(getRegionManager().getStringRegion(SL));
}
+ case Stmt::PredefinedExprClass: {
+ const auto *PE = cast<PredefinedExpr>(E);
+ assert(PE->getFunctionName() &&
+ "Since we analyze only instantiated functions, PredefinedExpr "
+ "should have a function name.");
+ return makeLoc(getRegionManager().getStringRegion(PE->getFunctionName()));
+ }
+
// Fast-path some expressions to avoid the overhead of going through the AST's
// constant evaluator
case Stmt::CharacterLiteralClass: {
@@ -377,8 +386,8 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
SVal SValBuilder::makeSymExprValNN(BinaryOperator::Opcode Op,
NonLoc LHS, NonLoc RHS,
QualType ResultTy) {
- const SymExpr *symLHS = LHS.getAsSymExpr();
- const SymExpr *symRHS = RHS.getAsSymExpr();
+ SymbolRef symLHS = LHS.getAsSymbol();
+ SymbolRef symRHS = RHS.getAsSymbol();
// TODO: When the Max Complexity is reached, we should conjure a symbol
// instead of generating an Unknown value and propagate the taint info to it.
@@ -492,7 +501,7 @@ SVal SValBuilder::evalIntegralCast(ProgramStateRef state, SVal val,
if (getContext().getTypeSize(castTy) >= getContext().getTypeSize(originalTy))
return evalCast(val, castTy, originalTy);
- const SymExpr *se = val.getAsSymbolicExpression();
+ SymbolRef se = val.getAsSymbol();
if (!se) // Let evalCast handle non symbolic expressions.
return evalCast(val, castTy, originalTy);
diff --git a/clang/lib/StaticAnalyzer/Core/SVals.cpp b/clang/lib/StaticAnalyzer/Core/SVals.cpp
index 9b5de6c3eb92..252596887e4f 100644
--- a/clang/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -84,16 +84,12 @@ const FunctionDecl *SVal::getAsFunctionDecl() const {
/// the first symbolic parent region is returned.
SymbolRef SVal::getAsLocSymbol(bool IncludeBaseRegions) const {
// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
- if (Optional<nonloc::LocAsInteger> X = getAs<nonloc::LocAsInteger>())
- return X->getLoc().getAsLocSymbol(IncludeBaseRegions);
-
- if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
- const MemRegion *R = X->getRegion();
- if (const SymbolicRegion *SymR = IncludeBaseRegions ?
- R->getSymbolicBase() :
- dyn_cast<SymbolicRegion>(R->StripCasts()))
+ if (const MemRegion *R = getAsRegion())
+ if (const SymbolicRegion *SymR =
+ IncludeBaseRegions ? R->getSymbolicBase()
+ : dyn_cast<SymbolicRegion>(R->StripCasts()))
return SymR->getSymbol();
- }
+
return nullptr;
}
@@ -116,8 +112,6 @@ SymbolRef SVal::getLocSymbolInBase() const {
return nullptr;
}
-// TODO: The next 3 functions have to be simplified.
-
/// If this SVal wraps a symbol return that SymbolRef.
/// Otherwise, return 0.
///
@@ -132,22 +126,6 @@ SymbolRef SVal::getAsSymbol(bool IncludeBaseRegions) const {
return getAsLocSymbol(IncludeBaseRegions);
}
-/// getAsSymbolicExpression - If this Sval wraps a symbolic expression then
-/// return that expression. Otherwise return NULL.
-const SymExpr *SVal::getAsSymbolicExpression() const {
- if (Optional<nonloc::SymbolVal> X = getAs<nonloc::SymbolVal>())
- return X->getSymbol();
-
- return getAsSymbol();
-}
-
-const SymExpr* SVal::getAsSymExpr() const {
- const SymExpr* Sym = getAsSymbol();
- if (!Sym)
- Sym = getAsSymbolicExpression();
- return Sym;
-}
-
const MemRegion *SVal::getAsRegion() const {
if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>())
return X->getRegion();
@@ -175,18 +153,18 @@ bool nonloc::PointerToMember::isNullMemberPointer() const {
return getPTMData().isNull();
}
-const DeclaratorDecl *nonloc::PointerToMember::getDecl() const {
+const NamedDecl *nonloc::PointerToMember::getDecl() const {
const auto PTMD = this->getPTMData();
if (PTMD.isNull())
return nullptr;
- const DeclaratorDecl *DD = nullptr;
- if (PTMD.is<const DeclaratorDecl *>())
- DD = PTMD.get<const DeclaratorDecl *>();
+ const NamedDecl *ND = nullptr;
+ if (PTMD.is<const NamedDecl *>())
+ ND = PTMD.get<const NamedDecl *>();
else
- DD = PTMD.get<const PointerToMemberData *>()->getDeclaratorDecl();
+ ND = PTMD.get<const PointerToMemberData *>()->getDeclaratorDecl();
- return DD;
+ return ND;
}
//===----------------------------------------------------------------------===//
@@ -203,14 +181,14 @@ nonloc::CompoundVal::iterator nonloc::CompoundVal::end() const {
nonloc::PointerToMember::iterator nonloc::PointerToMember::begin() const {
const PTMDataType PTMD = getPTMData();
- if (PTMD.is<const DeclaratorDecl *>())
+ if (PTMD.is<const NamedDecl *>())
return {};
return PTMD.get<const PointerToMemberData *>()->begin();
}
nonloc::PointerToMember::iterator nonloc::PointerToMember::end() const {
const PTMDataType PTMD = getPTMData();
- if (PTMD.is<const DeclaratorDecl *>())
+ if (PTMD.is<const NamedDecl *>())
return {};
return PTMD.get<const PointerToMemberData *>()->end();
}
diff --git a/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
index 8c2e85601576..f93d04ccd61a 100644
--- a/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
@@ -14,7 +14,6 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/Version.h"
#include "clang/Lex/Preprocessor.h"
-#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
@@ -32,8 +31,7 @@ class SarifDiagnostics : public PathDiagnosticConsumer {
const LangOptions &LO;
public:
- SarifDiagnostics(AnalyzerOptions &, const std::string &Output,
- const LangOptions &LO)
+ SarifDiagnostics(const std::string &Output, const LangOptions &LO)
: OutputFile(Output), LO(LO) {}
~SarifDiagnostics() override = default;
@@ -48,7 +46,7 @@ public:
} // end anonymous namespace
void ento::createSarifDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &Output, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU) {
@@ -56,8 +54,9 @@ void ento::createSarifDiagnosticConsumer(
if (Output.empty())
return;
- C.push_back(new SarifDiagnostics(AnalyzerOpts, Output, PP.getLangOpts()));
- createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, Output, PP, CTU);
+ C.push_back(new SarifDiagnostics(Output, PP.getLangOpts()));
+ createTextMinimalPathDiagnosticConsumer(std::move(DiagOpts), C, Output, PP,
+ CTU);
}
static StringRef getFileName(const FileEntry &FE) {
@@ -160,9 +159,8 @@ static unsigned int adjustColumnPos(const SourceManager &SM, SourceLocation Loc,
assert(LocInfo.second > SM.getExpansionColumnNumber(Loc) &&
"position in file is before column number?");
- bool InvalidBuffer = false;
- const MemoryBuffer *Buf = SM.getBuffer(LocInfo.first, &InvalidBuffer);
- assert(!InvalidBuffer && "got an invalid buffer for the location's file");
+ Optional<MemoryBufferRef> Buf = SM.getBufferOrNone(LocInfo.first);
+ assert(Buf && "got an invalid buffer for the location's file");
assert(Buf->getBufferSize() >= (LocInfo.second + TokenLen) &&
"token extends past end of buffer?");
diff --git a/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index 3709106ad44c..f96974f97dcc 100644
--- a/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -57,7 +57,7 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef State,
// SymIntExprs.
if (!canReasonAbout(Cond)) {
// Just add the constraint to the expression without trying to simplify.
- SymbolRef Sym = Cond.getAsSymExpr();
+ SymbolRef Sym = Cond.getAsSymbol();
assert(Sym);
return assumeSymUnsupported(State, Sym, Assumption);
}
@@ -101,7 +101,7 @@ ProgramStateRef SimpleConstraintManager::assumeInclusiveRange(
if (!canReasonAbout(Value)) {
// Just add the constraint to the expression without trying to simplify.
- SymbolRef Sym = Value.getAsSymExpr();
+ SymbolRef Sym = Value.getAsSymbol();
assert(Sym);
return assumeSymInclusiveRange(State, Sym, From, To, InRange);
}
diff --git a/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 2e269f6a596e..facadaf1225f 100644
--- a/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -86,7 +86,7 @@ SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
return makeLocAsInteger(LI->getLoc(), castSize);
}
- if (const SymExpr *se = val.getAsSymbolicExpression()) {
+ if (SymbolRef se = val.getAsSymbol()) {
QualType T = Context.getCanonicalType(se->getType());
// If types are the same or both are integers, ignore the cast.
// FIXME: Remove this hack when we support symbolic truncation/extension.
@@ -1106,19 +1106,28 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
}
SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
- BinaryOperator::Opcode op,
- Loc lhs, NonLoc rhs, QualType resultTy) {
+ BinaryOperator::Opcode op, Loc lhs,
+ NonLoc rhs, QualType resultTy) {
if (op >= BO_PtrMemD && op <= BO_PtrMemI) {
if (auto PTMSV = rhs.getAs<nonloc::PointerToMember>()) {
if (PTMSV->isNullMemberPointer())
return UndefinedVal();
- if (const FieldDecl *FD = PTMSV->getDeclAs<FieldDecl>()) {
+
+ auto getFieldLValue = [&](const auto *FD) -> SVal {
SVal Result = lhs;
for (const auto &I : *PTMSV)
Result = StateMgr.getStoreManager().evalDerivedToBase(
- Result, I->getType(),I->isVirtual());
+ Result, I->getType(), I->isVirtual());
+
return state->getLValue(FD, Result);
+ };
+
+ if (const auto *FD = PTMSV->getDeclAs<FieldDecl>()) {
+ return getFieldLValue(FD);
+ }
+ if (const auto *FD = PTMSV->getDeclAs<IndirectFieldDecl>()) {
+ return getFieldLValue(FD);
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 6ca7aec9caec..79a8eef30576 100644
--- a/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -14,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/StmtObjC.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Basic/LLVM.h"
@@ -34,6 +35,12 @@ using namespace ento;
void SymExpr::anchor() {}
+StringRef SymbolConjured::getKindStr() const { return "conj_$"; }
+StringRef SymbolDerived::getKindStr() const { return "derived_$"; }
+StringRef SymbolExtent::getKindStr() const { return "extent_$"; }
+StringRef SymbolMetadata::getKindStr() const { return "meta_$"; }
+StringRef SymbolRegionValue::getKindStr() const { return "reg_$"; }
+
LLVM_DUMP_METHOD void SymExpr::dump() const { dumpToStream(llvm::errs()); }
void BinarySymExpr::dumpToStreamImpl(raw_ostream &OS, const SymExpr *Sym) {
@@ -64,7 +71,7 @@ void SymbolCast::dumpToStream(raw_ostream &os) const {
}
void SymbolConjured::dumpToStream(raw_ostream &os) const {
- os << "conj_$" << getSymbolID() << '{' << T.getAsString() << ", LC"
+ os << getKindStr() << getSymbolID() << '{' << T.getAsString() << ", LC"
<< LCtx->getID();
if (S)
os << ", S" << S->getID(LCtx->getDecl()->getASTContext());
@@ -74,24 +81,24 @@ void SymbolConjured::dumpToStream(raw_ostream &os) const {
}
void SymbolDerived::dumpToStream(raw_ostream &os) const {
- os << "derived_$" << getSymbolID() << '{'
- << getParentSymbol() << ',' << getRegion() << '}';
+ os << getKindStr() << getSymbolID() << '{' << getParentSymbol() << ','
+ << getRegion() << '}';
}
void SymbolExtent::dumpToStream(raw_ostream &os) const {
- os << "extent_$" << getSymbolID() << '{' << getRegion() << '}';
+ os << getKindStr() << getSymbolID() << '{' << getRegion() << '}';
}
void SymbolMetadata::dumpToStream(raw_ostream &os) const {
- os << "meta_$" << getSymbolID() << '{'
- << getRegion() << ',' << T.getAsString() << '}';
+ os << getKindStr() << getSymbolID() << '{' << getRegion() << ','
+ << T.getAsString() << '}';
}
void SymbolData::anchor() {}
void SymbolRegionValue::dumpToStream(raw_ostream &os) const {
- os << "reg_$" << getSymbolID()
- << '<' << getType().getAsString() << ' ' << R << '>';
+ os << getKindStr() << getSymbolID() << '<' << getType().getAsString() << ' '
+ << R << '>';
}
bool SymExpr::symbol_iterator::operator==(const symbol_iterator &X) const {
@@ -482,7 +489,7 @@ bool SymbolReaper::isLive(SymbolRef sym) {
}
bool
-SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const {
+SymbolReaper::isLive(const Expr *ExprVal, const LocationContext *ELCtx) const {
if (LCtx == nullptr)
return false;
@@ -494,7 +501,8 @@ SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const {
return true;
}
- // If no statement is provided, everything is this and parent contexts is live.
+ // If no statement is provided, everything in this and parent contexts is
+ // live.
if (!Loc)
return true;
diff --git a/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
index f4c7e5978e19..ae2bad7ee77c 100644
--- a/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
@@ -34,20 +34,17 @@ namespace {
/// type to the standard error, or to to compliment many others. Emits detailed
/// diagnostics in textual format for the 'text' output type.
class TextDiagnostics : public PathDiagnosticConsumer {
+ PathDiagnosticConsumerOptions DiagOpts;
DiagnosticsEngine &DiagEng;
const LangOptions &LO;
- const bool IncludePath = false;
- const bool ShouldEmitAsError = false;
- const bool ApplyFixIts = false;
- const bool ShouldDisplayCheckerName = false;
+ bool ShouldDisplayPathNotes;
public:
- TextDiagnostics(DiagnosticsEngine &DiagEng, const LangOptions &LO,
- bool ShouldIncludePath, const AnalyzerOptions &AnOpts)
- : DiagEng(DiagEng), LO(LO), IncludePath(ShouldIncludePath),
- ShouldEmitAsError(AnOpts.AnalyzerWerror),
- ApplyFixIts(AnOpts.ShouldApplyFixIts),
- ShouldDisplayCheckerName(AnOpts.ShouldDisplayCheckerNameForText) {}
+ TextDiagnostics(PathDiagnosticConsumerOptions DiagOpts,
+ DiagnosticsEngine &DiagEng, const LangOptions &LO,
+ bool ShouldDisplayPathNotes)
+ : DiagOpts(std::move(DiagOpts)), DiagEng(DiagEng), LO(LO),
+ ShouldDisplayPathNotes(ShouldDisplayPathNotes) {}
~TextDiagnostics() override {}
StringRef getName() const override { return "TextDiagnostics"; }
@@ -56,13 +53,13 @@ public:
bool supportsCrossFileDiagnostics() const override { return true; }
PathGenerationScheme getGenerationScheme() const override {
- return IncludePath ? Minimal : None;
+ return ShouldDisplayPathNotes ? Minimal : None;
}
void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) override {
unsigned WarnID =
- ShouldEmitAsError
+ DiagOpts.ShouldDisplayWarningsAsErrors
? DiagEng.getCustomDiagID(DiagnosticsEngine::Error, "%0")
: DiagEng.getCustomDiagID(DiagnosticsEngine::Warning, "%0");
unsigned NoteID = DiagEng.getCustomDiagID(DiagnosticsEngine::Note, "%0");
@@ -72,7 +69,7 @@ public:
auto reportPiece = [&](unsigned ID, FullSourceLoc Loc, StringRef String,
ArrayRef<SourceRange> Ranges,
ArrayRef<FixItHint> Fixits) {
- if (!ApplyFixIts) {
+ if (!DiagOpts.ShouldApplyFixIts) {
DiagEng.Report(Loc, ID) << String << Ranges << Fixits;
return;
}
@@ -92,9 +89,10 @@ public:
E = Diags.end();
I != E; ++I) {
const PathDiagnostic *PD = *I;
- std::string WarningMsg =
- (ShouldDisplayCheckerName ? " [" + PD->getCheckerName() + "]" : "")
- .str();
+ std::string WarningMsg = (DiagOpts.ShouldDisplayDiagnosticName
+ ? " [" + PD->getCheckerName() + "]"
+ : "")
+ .str();
reportPiece(WarnID, PD->getLocation().asLocation(),
(PD->getShortDescription() + WarningMsg).str(),
@@ -110,7 +108,7 @@ public:
Piece->getFixits());
}
- if (!IncludePath)
+ if (!ShouldDisplayPathNotes)
continue;
// Then, add the path notes if necessary.
@@ -125,7 +123,7 @@ public:
}
}
- if (!ApplyFixIts || Repls.empty())
+ if (Repls.empty())
return;
Rewriter Rewrite(SM, LO);
@@ -139,18 +137,19 @@ public:
} // end anonymous namespace
void ento::createTextPathDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &Prefix, const clang::Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU) {
- C.emplace_back(new TextDiagnostics(PP.getDiagnostics(), PP.getLangOpts(),
- /*ShouldIncludePath*/ true, AnalyzerOpts));
+ C.emplace_back(new TextDiagnostics(std::move(DiagOpts), PP.getDiagnostics(),
+ PP.getLangOpts(),
+ /*ShouldDisplayPathNotes=*/true));
}
void ento::createTextMinimalPathDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &Prefix, const clang::Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU) {
- C.emplace_back(new TextDiagnostics(PP.getDiagnostics(), PP.getLangOpts(),
- /*ShouldIncludePath*/ false,
- AnalyzerOpts));
+ C.emplace_back(new TextDiagnostics(std::move(DiagOpts), PP.getDiagnostics(),
+ PP.getLangOpts(),
+ /*ShouldDisplayPathNotes=*/false));
}
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 392049e21c6e..f2a19b2ccc90 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -150,7 +150,7 @@ public:
break;
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN) \
case PD_##NAME: \
- CREATEFN(*Opts.get(), PathConsumers, OutDir, PP, CTU); \
+ CREATEFN(Opts->getDiagOpts(), PathConsumers, OutDir, PP, CTU); \
break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
default:
@@ -476,7 +476,7 @@ void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
static bool isBisonFile(ASTContext &C) {
const SourceManager &SM = C.getSourceManager();
FileID FID = SM.getMainFileID();
- StringRef Buffer = SM.getBuffer(FID)->getBuffer();
+ StringRef Buffer = SM.getBufferOrFake(FID).getBuffer();
if (Buffer.startswith("/* A Bison parser, made by"))
return true;
return false;
diff --git a/clang/lib/Tooling/AllTUsExecution.cpp b/clang/lib/Tooling/AllTUsExecution.cpp
index 7707c99c21d0..5565da9b548a 100644
--- a/clang/lib/Tooling/AllTUsExecution.cpp
+++ b/clang/lib/Tooling/AllTUsExecution.cpp
@@ -124,7 +124,7 @@ llvm::Error AllTUsToolExecutor::execute(
// Each thread gets an indepent copy of a VFS to allow different
// concurrent working directories.
IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS =
- llvm::vfs::createPhysicalFileSystem().release();
+ llvm::vfs::createPhysicalFileSystem();
ClangTool Tool(Compilations, {Path},
std::make_shared<PCHContainerOperations>(), FS);
Tool.appendArgumentsAdjuster(Action.second);
diff --git a/clang/lib/Tooling/ArgumentsAdjusters.cpp b/clang/lib/Tooling/ArgumentsAdjusters.cpp
index a857b57fbf7b..bcfb5b39a077 100644
--- a/clang/lib/Tooling/ArgumentsAdjusters.cpp
+++ b/clang/lib/Tooling/ArgumentsAdjusters.cpp
@@ -21,6 +21,16 @@
namespace clang {
namespace tooling {
+static StringRef getDriverMode(const CommandLineArguments &Args) {
+ for (const auto &Arg : Args) {
+ StringRef ArgRef = Arg;
+ if (ArgRef.consume_front("--driver-mode=")) {
+ return ArgRef;
+ }
+ }
+ return StringRef();
+}
+
/// Add -fsyntax-only option and drop options that triggers output generation.
ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
return [](const CommandLineArguments &Args, StringRef /*unused*/) {
@@ -93,20 +103,28 @@ ArgumentsAdjuster getClangStripSerializeDiagnosticAdjuster() {
ArgumentsAdjuster getClangStripDependencyFileAdjuster() {
return [](const CommandLineArguments &Args, StringRef /*unused*/) {
+ auto UsingClDriver = (getDriverMode(Args) == "cl");
+
CommandLineArguments AdjustedArgs;
for (size_t i = 0, e = Args.size(); i < e; ++i) {
StringRef Arg = Args[i];
- // All dependency-file options begin with -M. These include -MM,
- // -MF, -MG, -MP, -MT, -MQ, -MD, and -MMD.
- if (!Arg.startswith("-M") && !Arg.startswith("/showIncludes") &&
- !Arg.startswith("-showIncludes")) {
- AdjustedArgs.push_back(Args[i]);
+
+ // These flags take an argument: -MX foo. Skip the next argument also.
+ if (!UsingClDriver && (Arg == "-MF" || Arg == "-MT" || Arg == "-MQ")) {
+ ++i;
continue;
}
+ // When not using the cl driver mode, dependency file generation options
+ // begin with -M. These include -MM, -MF, -MG, -MP, -MT, -MQ, -MD, and
+ // -MMD.
+ if (!UsingClDriver && Arg.startswith("-M"))
+ continue;
+ // Under MSVC's cl driver mode, dependency file generation is controlled
+ // using /showIncludes
+ if (Arg.startswith("/showIncludes") || Arg.startswith("-showIncludes"))
+ continue;
- if (Arg == "-MF" || Arg == "-MT" || Arg == "-MQ")
- // These flags take an argument: -MX foo. Skip the next argument also.
- ++i;
+ AdjustedArgs.push_back(Args[i]);
}
return AdjustedArgs;
};
diff --git a/clang/lib/Tooling/CompilationDatabase.cpp b/clang/lib/Tooling/CompilationDatabase.cpp
index 2b4c26dab96f..1e19e68633d2 100644
--- a/clang/lib/Tooling/CompilationDatabase.cpp
+++ b/clang/lib/Tooling/CompilationDatabase.cpp
@@ -199,22 +199,6 @@ public:
SmallVector<std::string, 2> UnusedInputs;
};
-// Unary functor for asking "Given a StringRef S1, does there exist a string
-// S2 in Arr where S1 == S2?"
-struct MatchesAny {
- MatchesAny(ArrayRef<std::string> Arr) : Arr(Arr) {}
-
- bool operator() (StringRef S) {
- for (const std::string *I = Arr.begin(), *E = Arr.end(); I != E; ++I)
- if (*I == S)
- return true;
- return false;
- }
-
-private:
- ArrayRef<std::string> Arr;
-};
-
// Filter of tools unused flags such as -no-integrated-as and -Wa,*.
// They are not used for syntax checking, and could confuse targets
// which don't support these options.
@@ -292,8 +276,7 @@ static bool stripPositionalArgs(std::vector<const char *> Args,
// up with no jobs but then this is the user's fault.
Args.push_back("placeholder.cpp");
- Args.erase(std::remove_if(Args.begin(), Args.end(), FilterUnusedFlags()),
- Args.end());
+ llvm::erase_if(Args, FilterUnusedFlags());
const std::unique_ptr<driver::Compilation> Compilation(
NewDriver->BuildCompilation(Args));
@@ -320,15 +303,14 @@ static bool stripPositionalArgs(std::vector<const char *> Args,
return false;
}
- // Remove all compilation input files from the command line. This is
- // necessary so that getCompileCommands() can construct a command line for
- // each file.
- std::vector<const char *>::iterator End = std::remove_if(
- Args.begin(), Args.end(), MatchesAny(CompileAnalyzer.Inputs));
-
- // Remove all inputs deemed unused for compilation.
- End = std::remove_if(Args.begin(), End, MatchesAny(DiagClient.UnusedInputs));
-
+ // Remove all compilation input files from the command line and inputs deemed
+ // unused for compilation. This is necessary so that getCompileCommands() can
+ // construct a command line for each file.
+ std::vector<const char *>::iterator End =
+ llvm::remove_if(Args, [&](StringRef S) {
+ return llvm::is_contained(CompileAnalyzer.Inputs, S) ||
+ llvm::is_contained(DiagClient.UnusedInputs, S);
+ });
// Remove the -c add above as well. It will be at the end right now.
assert(strcmp(*(End - 1), "-c") == 0);
--End;
@@ -341,7 +323,7 @@ std::unique_ptr<FixedCompilationDatabase>
FixedCompilationDatabase::loadFromCommandLine(int &Argc,
const char *const *Argv,
std::string &ErrorMsg,
- Twine Directory) {
+ const Twine &Directory) {
ErrorMsg.clear();
if (Argc == 0)
return nullptr;
@@ -366,20 +348,28 @@ FixedCompilationDatabase::loadFromFile(StringRef Path, std::string &ErrorMsg) {
ErrorMsg = "Error while opening fixed database: " + Result.message();
return nullptr;
}
+ return loadFromBuffer(llvm::sys::path::parent_path(Path),
+ (*File)->getBuffer(), ErrorMsg);
+}
+
+std::unique_ptr<FixedCompilationDatabase>
+FixedCompilationDatabase::loadFromBuffer(StringRef Directory, StringRef Data,
+ std::string &ErrorMsg) {
+ ErrorMsg.clear();
std::vector<std::string> Args;
- for (llvm::StringRef Line :
- llvm::make_range(llvm::line_iterator(**File), llvm::line_iterator())) {
+ StringRef Line;
+ while (!Data.empty()) {
+ std::tie(Line, Data) = Data.split('\n');
// Stray whitespace is almost certainly unintended.
Line = Line.trim();
if (!Line.empty())
Args.push_back(Line.str());
}
- return std::make_unique<FixedCompilationDatabase>(
- llvm::sys::path::parent_path(Path), std::move(Args));
+ return std::make_unique<FixedCompilationDatabase>(Directory, std::move(Args));
}
-FixedCompilationDatabase::
-FixedCompilationDatabase(Twine Directory, ArrayRef<std::string> CommandLine) {
+FixedCompilationDatabase::FixedCompilationDatabase(
+ const Twine &Directory, ArrayRef<std::string> CommandLine) {
std::vector<std::string> ToolCommandLine(1, GetClangToolCommand());
ToolCommandLine.insert(ToolCommandLine.end(),
CommandLine.begin(), CommandLine.end());
diff --git a/clang/lib/Tooling/Core/Replacement.cpp b/clang/lib/Tooling/Core/Replacement.cpp
index ab8e20539559..30e1923bf1cb 100644
--- a/clang/lib/Tooling/Core/Replacement.cpp
+++ b/clang/lib/Tooling/Core/Replacement.cpp
@@ -591,7 +591,7 @@ llvm::Expected<std::string> applyAllReplacements(StringRef Code,
Rewriter Rewrite(SourceMgr, LangOptions());
InMemoryFileSystem->addFile(
"<stdin>", 0, llvm::MemoryBuffer::getMemBuffer(Code, "<stdin>"));
- FileID ID = SourceMgr.createFileID(*Files.getFile("<stdin>"),
+ FileID ID = SourceMgr.createFileID(*Files.getOptionalFileRef("<stdin>"),
SourceLocation(),
clang::SrcMgr::C_User);
for (auto I = Replaces.rbegin(), E = Replaces.rend(); I != E; ++I) {
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index b1b87e7fa573..1c10b7d727a5 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -217,9 +217,11 @@ public:
llvm::vfs::Status Stat)
: Buffer(std::move(Buffer)), Stat(std::move(Stat)) {}
- llvm::ErrorOr<llvm::vfs::Status> status() override { return Stat; }
+ static llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
+ create(const CachedFileSystemEntry *Entry,
+ ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings);
- const llvm::MemoryBuffer *getBufferPtr() const { return Buffer.get(); }
+ llvm::ErrorOr<llvm::vfs::Status> status() override { return Stat; }
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
@@ -234,9 +236,11 @@ private:
llvm::vfs::Status Stat;
};
-llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
-createFile(const CachedFileSystemEntry *Entry,
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings) {
+} // end anonymous namespace
+
+llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>> MinimizedVFSFile::create(
+ const CachedFileSystemEntry *Entry,
+ ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings) {
if (Entry->isDirectory())
return llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>(
std::make_error_code(std::errc::is_a_directory));
@@ -248,14 +252,12 @@ createFile(const CachedFileSystemEntry *Entry,
/*RequiresNullTerminator=*/false),
*Entry->getStatus());
if (!Entry->getPPSkippedRangeMapping().empty() && PPSkipMappings)
- (*PPSkipMappings)[Result->getBufferPtr()] =
+ (*PPSkipMappings)[Result->Buffer->getBufferStart()] =
&Entry->getPPSkippedRangeMapping();
return llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>(
std::unique_ptr<llvm::vfs::File>(std::move(Result)));
}
-} // end anonymous namespace
-
llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
DependencyScanningWorkerFilesystem::openFileForRead(const Twine &Path) {
SmallString<256> OwnedFilename;
@@ -265,5 +267,5 @@ DependencyScanningWorkerFilesystem::openFileForRead(const Twine &Path) {
getOrCreateFileSystemEntry(Filename);
if (!Result)
return Result.getError();
- return createFile(Result.get(), PPSkipMappings);
+ return MinimizedVFSFile::create(Result.get(), PPSkipMappings);
}
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index 32bbc578d2db..63264b0dda2d 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -44,28 +44,6 @@ private:
DependencyConsumer &C;
};
-/// A proxy file system that doesn't call `chdir` when changing the working
-/// directory of a clang tool.
-class ProxyFileSystemWithoutChdir : public llvm::vfs::ProxyFileSystem {
-public:
- ProxyFileSystemWithoutChdir(
- llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS)
- : ProxyFileSystem(std::move(FS)) {}
-
- llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
- assert(!CWD.empty() && "empty CWD");
- return CWD;
- }
-
- std::error_code setCurrentWorkingDirectory(const Twine &Path) override {
- CWD = Path.str();
- return {};
- }
-
-private:
- std::string CWD;
-};
-
/// A clang tool that runs the preprocessor in a mode that's optimized for
/// dependency scanning for the given compiler invocation.
class DependencyScanningAction : public tooling::ToolAction {
@@ -176,7 +154,7 @@ DependencyScanningWorker::DependencyScanningWorker(
: Format(Service.getFormat()) {
DiagOpts = new DiagnosticOptions();
PCHContainerOps = std::make_shared<PCHContainerOperations>();
- RealFS = new ProxyFileSystemWithoutChdir(llvm::vfs::getRealFileSystem());
+ RealFS = llvm::vfs::createPhysicalFileSystem();
if (Service.canSkipExcludedPPRanges())
PPSkipMappings =
std::make_unique<ExcludedPreprocessorDirectiveSkipMapping>();
diff --git a/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
index 4f6eff799f22..f74ce7304df5 100644
--- a/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
+++ b/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
@@ -77,15 +77,10 @@ void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
// Dependency generation really does want to go all the way to the
// file entry for a source location to find out what is depended on.
// We do not want #line markers to affect dependency generation!
- Optional<FileEntryRef> File =
- SM.getFileEntryRefForID(SM.getFileID(SM.getExpansionLoc(Loc)));
- if (!File)
- return;
-
- StringRef FileName =
- llvm::sys::path::remove_leading_dotslash(File->getName());
-
- MDC.MainDeps.push_back(std::string(FileName));
+ if (Optional<StringRef> Filename =
+ SM.getNonBuiltinFilenameForID(SM.getFileID(SM.getExpansionLoc(Loc))))
+ MDC.MainDeps.push_back(
+ std::string(llvm::sys::path::remove_leading_dotslash(*Filename)));
}
void ModuleDepCollectorPP::InclusionDirective(
diff --git a/clang/lib/Tooling/FileMatchTrie.cpp b/clang/lib/Tooling/FileMatchTrie.cpp
index 88dea6bb6c9f..3b02405da2f2 100644
--- a/clang/lib/Tooling/FileMatchTrie.cpp
+++ b/clang/lib/Tooling/FileMatchTrie.cpp
@@ -105,8 +105,13 @@ public:
StringRef FileName,
bool &IsAmbiguous,
unsigned ConsumedLength = 0) const {
+ // Note: we support only directory symlinks for performance reasons.
if (Children.empty()) {
- if (Comparator.equivalent(StringRef(Path), FileName))
+ // As far as we do not support file symlinks, compare
+ // basenames here to avoid request to file system.
+ if (llvm::sys::path::filename(Path) ==
+ llvm::sys::path::filename(FileName) &&
+ Comparator.equivalent(StringRef(Path), FileName))
return StringRef(Path);
return {};
}
@@ -121,6 +126,13 @@ public:
if (!Result.empty() || IsAmbiguous)
return Result;
}
+
+ // If `ConsumedLength` is zero, this is the root and we have no filename
+ // match. Give up in this case, we don't try to find symlinks with
+ // different names.
+ if (ConsumedLength == 0)
+ return {};
+
std::vector<StringRef> AllChildren;
getAll(AllChildren, MatchingChild);
StringRef Result;
diff --git a/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp b/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
index 681fcc5c762a..d1f984632660 100644
--- a/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -12,6 +12,7 @@
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Path.h"
namespace clang {
namespace tooling {
@@ -42,7 +43,7 @@ unsigned getOffsetAfterTokenSequence(
GetOffsetAfterSequence) {
SourceManagerForFile VirtualSM(FileName, Code);
SourceManager &SM = VirtualSM.get();
- Lexer Lex(SM.getMainFileID(), SM.getBuffer(SM.getMainFileID()), SM,
+ Lexer Lex(SM.getMainFileID(), SM.getBufferOrFake(SM.getMainFileID()), SM,
createLangOpts());
Token Tok;
// Get the first token.
@@ -100,7 +101,8 @@ unsigned getOffsetAfterHeaderGuardsAndComments(StringRef FileName,
[](const SourceManager &SM, Lexer &Lex, Token Tok) -> unsigned {
if (checkAndConsumeDirectiveWithName(Lex, "ifndef", Tok)) {
skipComments(Lex, Tok);
- if (checkAndConsumeDirectiveWithName(Lex, "define", Tok))
+ if (checkAndConsumeDirectiveWithName(Lex, "define", Tok) &&
+ Tok.isAtStartOfLine())
return SM.getFileOffset(Tok.getLocation());
}
return 0;
@@ -173,14 +175,26 @@ inline StringRef trimInclude(StringRef IncludeName) {
const char IncludeRegexPattern[] =
R"(^[\t\ ]*#[\t\ ]*(import|include)[^"<]*(["<][^">]*[">]))";
+// The filename of Path excluding extension.
+// Used to match implementation with headers, this differs from sys::path::stem:
+// - in names with multiple dots (foo.cu.cc) it terminates at the *first*
+// - an empty stem is never returned: /foo/.bar.x => .bar
+// - we don't bother to handle . and .. specially
+StringRef matchingStem(llvm::StringRef Path) {
+ StringRef Name = llvm::sys::path::filename(Path);
+ return Name.substr(0, Name.find('.', 1));
+}
+
} // anonymous namespace
IncludeCategoryManager::IncludeCategoryManager(const IncludeStyle &Style,
StringRef FileName)
: Style(Style), FileName(FileName) {
- FileStem = llvm::sys::path::stem(FileName);
- for (const auto &Category : Style.IncludeCategories)
- CategoryRegexs.emplace_back(Category.Regex, llvm::Regex::IgnoreCase);
+ for (const auto &Category : Style.IncludeCategories) {
+ CategoryRegexs.emplace_back(Category.Regex, Category.RegexIsCaseSensitive
+ ? llvm::Regex::NoFlags
+ : llvm::Regex::IgnoreCase);
+ }
IsMainFile = FileName.endswith(".c") || FileName.endswith(".cc") ||
FileName.endswith(".cpp") || FileName.endswith(".c++") ||
FileName.endswith(".cxx") || FileName.endswith(".m") ||
@@ -221,13 +235,31 @@ int IncludeCategoryManager::getSortIncludePriority(StringRef IncludeName,
bool IncludeCategoryManager::isMainHeader(StringRef IncludeName) const {
if (!IncludeName.startswith("\""))
return false;
- StringRef HeaderStem =
- llvm::sys::path::stem(IncludeName.drop_front(1).drop_back(1));
- if (FileStem.startswith(HeaderStem) ||
- FileStem.startswith_lower(HeaderStem)) {
+
+ IncludeName =
+ IncludeName.drop_front(1).drop_back(1); // remove the surrounding "" or <>
+ // Not matchingStem: implementation files may have compound extensions but
+ // headers may not.
+ StringRef HeaderStem = llvm::sys::path::stem(IncludeName);
+ StringRef FileStem = llvm::sys::path::stem(FileName); // foo.cu for foo.cu.cc
+ StringRef MatchingFileStem = matchingStem(FileName); // foo for foo.cu.cc
+ // main-header examples:
+ // 1) foo.h => foo.cc
+ // 2) foo.h => foo.cu.cc
+ // 3) foo.proto.h => foo.proto.cc
+ //
+ // non-main-header examples:
+ // 1) foo.h => bar.cc
+ // 2) foo.proto.h => foo.cc
+ StringRef Matching;
+ if (MatchingFileStem.startswith_lower(HeaderStem))
+ Matching = MatchingFileStem; // example 1), 2)
+ else if (FileStem.equals_lower(HeaderStem))
+ Matching = FileStem; // example 3)
+ if (!Matching.empty()) {
llvm::Regex MainIncludeRegex(HeaderStem.str() + Style.IncludeIsMainRegex,
llvm::Regex::IgnoreCase);
- if (MainIncludeRegex.match(FileStem))
+ if (MainIncludeRegex.match(Matching))
return true;
}
return false;
diff --git a/clang/lib/Tooling/Inclusions/IncludeStyle.cpp b/clang/lib/Tooling/Inclusions/IncludeStyle.cpp
index 26dc0b87cf9d..da5bb00d1013 100644
--- a/clang/lib/Tooling/Inclusions/IncludeStyle.cpp
+++ b/clang/lib/Tooling/Inclusions/IncludeStyle.cpp
@@ -18,6 +18,7 @@ void MappingTraits<IncludeStyle::IncludeCategory>::mapping(
IO.mapOptional("Regex", Category.Regex);
IO.mapOptional("Priority", Category.Priority);
IO.mapOptional("SortPriority", Category.SortPriority);
+ IO.mapOptional("CaseSensitive", Category.RegexIsCaseSensitive);
}
void ScalarEnumerationTraits<IncludeStyle::IncludeBlocksStyle>::enumeration(
diff --git a/clang/lib/Tooling/JSONCompilationDatabase.cpp b/clang/lib/Tooling/JSONCompilationDatabase.cpp
index 4af361f538cb..2d8847a7a327 100644
--- a/clang/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/clang/lib/Tooling/JSONCompilationDatabase.cpp
@@ -217,7 +217,7 @@ JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString,
std::string &ErrorMessage,
JSONCommandLineSyntax Syntax) {
std::unique_ptr<llvm::MemoryBuffer> DatabaseBuffer(
- llvm::MemoryBuffer::getMemBuffer(DatabaseString));
+ llvm::MemoryBuffer::getMemBufferCopy(DatabaseString));
std::unique_ptr<JSONCompilationDatabase> Database(
new JSONCompilationDatabase(std::move(DatabaseBuffer), Syntax));
if (!Database->parse(ErrorMessage))
@@ -272,7 +272,8 @@ static bool unwrapCommand(std::vector<std::string> &Args) {
return false;
StringRef Wrapper =
stripExecutableExtension(llvm::sys::path::filename(Args.front()));
- if (Wrapper == "distcc" || Wrapper == "gomacc" || Wrapper == "ccache") {
+ if (Wrapper == "distcc" || Wrapper == "gomacc" || Wrapper == "ccache" ||
+ Wrapper == "sccache") {
// Most of these wrappers support being invoked 3 ways:
// `distcc g++ file.c` This is the mode we're trying to match.
// We need to drop `distcc`.
@@ -369,16 +370,11 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
}
auto *ValueString = dyn_cast<llvm::yaml::ScalarNode>(Value);
auto *SequenceString = dyn_cast<llvm::yaml::SequenceNode>(Value);
- if (KeyValue == "arguments" && !SequenceString) {
- ErrorMessage = "Expected sequence as value.";
- return false;
- } else if (KeyValue != "arguments" && !ValueString) {
- ErrorMessage = "Expected string as value.";
- return false;
- }
- if (KeyValue == "directory") {
- Directory = ValueString;
- } else if (KeyValue == "arguments") {
+ if (KeyValue == "arguments") {
+ if (!SequenceString) {
+ ErrorMessage = "Expected sequence as value.";
+ return false;
+ }
Command = std::vector<llvm::yaml::ScalarNode *>();
for (auto &Argument : *SequenceString) {
auto *Scalar = dyn_cast<llvm::yaml::ScalarNode>(&Argument);
@@ -388,17 +384,25 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
}
Command->push_back(Scalar);
}
- } else if (KeyValue == "command") {
- if (!Command)
- Command = std::vector<llvm::yaml::ScalarNode *>(1, ValueString);
- } else if (KeyValue == "file") {
- File = ValueString;
- } else if (KeyValue == "output") {
- Output = ValueString;
} else {
- ErrorMessage = ("Unknown key: \"" +
- KeyString->getRawValue() + "\"").str();
- return false;
+ if (!ValueString) {
+ ErrorMessage = "Expected string as value.";
+ return false;
+ }
+ if (KeyValue == "directory") {
+ Directory = ValueString;
+ } else if (KeyValue == "command") {
+ if (!Command)
+ Command = std::vector<llvm::yaml::ScalarNode *>(1, ValueString);
+ } else if (KeyValue == "file") {
+ File = ValueString;
+ } else if (KeyValue == "output") {
+ Output = ValueString;
+ } else {
+ ErrorMessage =
+ ("Unknown key: \"" + KeyString->getRawValue() + "\"").str();
+ return false;
+ }
}
}
if (!File) {
diff --git a/clang/lib/Tooling/Refactoring/ASTSelection.cpp b/clang/lib/Tooling/Refactoring/ASTSelection.cpp
index af1eb491a20a..9485c8bc04ad 100644
--- a/clang/lib/Tooling/Refactoring/ASTSelection.cpp
+++ b/clang/lib/Tooling/Refactoring/ASTSelection.cpp
@@ -218,7 +218,7 @@ static void dump(const SelectedASTNode &Node, llvm::raw_ostream &OS,
if (const Decl *D = Node.Node.get<Decl>()) {
OS << D->getDeclKindName() << "Decl";
if (const auto *ND = dyn_cast<NamedDecl>(D))
- OS << " \"" << ND->getNameAsString() << '"';
+ OS << " \"" << ND->getDeclName() << '"';
} else if (const Stmt *S = Node.Node.get<Stmt>()) {
OS << S->getStmtClassName();
}
diff --git a/clang/lib/Tooling/Core/Lookup.cpp b/clang/lib/Tooling/Refactoring/Lookup.cpp
index 712724a268fb..9468d4d032a7 100644
--- a/clang/lib/Tooling/Core/Lookup.cpp
+++ b/clang/lib/Tooling/Refactoring/Lookup.cpp
@@ -10,7 +10,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Tooling/Core/Lookup.h"
+#include "clang/Tooling/Refactoring/Lookup.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
diff --git a/clang/lib/Tooling/Refactoring/RefactoringActions.cpp b/clang/lib/Tooling/Refactoring/RefactoringActions.cpp
index 7ac723f67c04..bf98941f568b 100644
--- a/clang/lib/Tooling/Refactoring/RefactoringActions.cpp
+++ b/clang/lib/Tooling/Refactoring/RefactoringActions.cpp
@@ -18,8 +18,8 @@ namespace {
class DeclNameOption final : public OptionalRefactoringOption<std::string> {
public:
- StringRef getName() const { return "name"; }
- StringRef getDescription() const {
+ StringRef getName() const override { return "name"; }
+ StringRef getDescription() const override {
return "Name of the extracted declaration";
}
};
diff --git a/clang/lib/Tooling/Refactoring/Rename/SymbolOccurrences.cpp b/clang/lib/Tooling/Refactoring/Rename/SymbolOccurrences.cpp
index 9e69d37e81ad..762864c953d8 100644
--- a/clang/lib/Tooling/Refactoring/Rename/SymbolOccurrences.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/SymbolOccurrences.cpp
@@ -21,13 +21,12 @@ SymbolOccurrence::SymbolOccurrence(const SymbolName &Name, OccurrenceKind Kind,
"mismatching number of locations and lengths");
assert(!Locations.empty() && "no locations");
if (Locations.size() == 1) {
- RangeOrNumRanges = SourceRange(
+ new (&SingleRange) SourceRange(
Locations[0], Locations[0].getLocWithOffset(NamePieces[0].size()));
return;
}
MultipleRanges = std::make_unique<SourceRange[]>(Locations.size());
- RangeOrNumRanges.setBegin(
- SourceLocation::getFromRawEncoding(Locations.size()));
+ NumRanges = Locations.size();
for (const auto &Loc : llvm::enumerate(Locations)) {
MultipleRanges[Loc.index()] = SourceRange(
Loc.value(),
diff --git a/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp b/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp
index 23f567f1c9ec..ac8ad344623c 100644
--- a/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp
@@ -133,13 +133,13 @@ const NamedDecl *getNamedDeclFor(const ASTContext &Context,
}
std::string getUSRForDecl(const Decl *Decl) {
- llvm::SmallVector<char, 128> Buff;
+ llvm::SmallString<128> Buff;
// FIXME: Add test for the nullptr case.
if (Decl == nullptr || index::generateUSRForDecl(Decl, Buff))
return "";
- return std::string(Buff.data(), Buff.size());
+ return std::string(Buff);
}
} // end namespace tooling
diff --git a/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp b/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
index 43dc32e158d3..a69b76a3c971 100644
--- a/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
@@ -80,6 +80,22 @@ public:
} else if (const auto *TemplateDecl =
dyn_cast<ClassTemplateDecl>(FoundDecl)) {
handleClassTemplateDecl(TemplateDecl);
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(FoundDecl)) {
+ USRSet.insert(getUSRForDecl(FD));
+ if (const auto *FTD = FD->getPrimaryTemplate())
+ handleFunctionTemplateDecl(FTD);
+ } else if (const auto *FD = dyn_cast<FunctionTemplateDecl>(FoundDecl)) {
+ handleFunctionTemplateDecl(FD);
+ } else if (const auto *VTD = dyn_cast<VarTemplateDecl>(FoundDecl)) {
+ handleVarTemplateDecl(VTD);
+ } else if (const auto *VD =
+ dyn_cast<VarTemplateSpecializationDecl>(FoundDecl)) {
+ // FIXME: figure out why FoundDecl can be a VarTemplateSpecializationDecl.
+ handleVarTemplateDecl(VD->getSpecializedTemplate());
+ } else if (const auto *VD = dyn_cast<VarDecl>(FoundDecl)) {
+ USRSet.insert(getUSRForDecl(VD));
+ if (const auto *VTD = VD->getDescribedVarTemplate())
+ handleVarTemplateDecl(VTD);
} else {
USRSet.insert(getUSRForDecl(FoundDecl));
}
@@ -96,12 +112,6 @@ public:
return true;
}
- bool VisitClassTemplatePartialSpecializationDecl(
- const ClassTemplatePartialSpecializationDecl *PartialSpec) {
- PartialSpecs.push_back(PartialSpec);
- return true;
- }
-
private:
void handleCXXRecordDecl(const CXXRecordDecl *RecordDecl) {
if (!RecordDecl->getDefinition()) {
@@ -118,14 +128,33 @@ private:
void handleClassTemplateDecl(const ClassTemplateDecl *TemplateDecl) {
for (const auto *Specialization : TemplateDecl->specializations())
addUSRsOfCtorDtors(Specialization);
-
- for (const auto *PartialSpec : PartialSpecs) {
- if (PartialSpec->getSpecializedTemplate() == TemplateDecl)
- addUSRsOfCtorDtors(PartialSpec);
- }
+ SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ TemplateDecl->getPartialSpecializations(PartialSpecs);
+ for (const auto *Spec : PartialSpecs)
+ addUSRsOfCtorDtors(Spec);
addUSRsOfCtorDtors(TemplateDecl->getTemplatedDecl());
}
+ void handleFunctionTemplateDecl(const FunctionTemplateDecl *FTD) {
+ USRSet.insert(getUSRForDecl(FTD));
+ USRSet.insert(getUSRForDecl(FTD->getTemplatedDecl()));
+ for (const auto *S : FTD->specializations())
+ USRSet.insert(getUSRForDecl(S));
+ }
+
+ void handleVarTemplateDecl(const VarTemplateDecl *VTD) {
+ USRSet.insert(getUSRForDecl(VTD));
+ USRSet.insert(getUSRForDecl(VTD->getTemplatedDecl()));
+ llvm::for_each(VTD->specializations(), [&](const auto *Spec) {
+ USRSet.insert(getUSRForDecl(Spec));
+ });
+ SmallVector<VarTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ VTD->getPartialSpecializations(PartialSpecs);
+ llvm::for_each(PartialSpecs, [&](const auto *Spec) {
+ USRSet.insert(getUSRForDecl(Spec));
+ });
+ }
+
void addUSRsOfCtorDtors(const CXXRecordDecl *RD) {
const auto* RecordDecl = RD->getDefinition();
@@ -184,7 +213,6 @@ private:
std::set<std::string> USRSet;
std::vector<const CXXMethodDecl *> OverriddenMethods;
std::vector<const CXXMethodDecl *> InstantiatedMethods;
- std::vector<const ClassTemplatePartialSpecializationDecl *> PartialSpecs;
};
} // namespace
diff --git a/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index dfc319dd0639..6a08c7fd5247 100644
--- a/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -21,7 +21,7 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
-#include "clang/Tooling/Core/Lookup.h"
+#include "clang/Tooling/Refactoring/Lookup.h"
#include "clang/Tooling/Refactoring/RecursiveSymbolVisitor.h"
#include "clang/Tooling/Refactoring/Rename/SymbolName.h"
#include "clang/Tooling/Refactoring/Rename/USRFinder.h"
diff --git a/clang/lib/Tooling/Syntax/BuildTree.cpp b/clang/lib/Tooling/Syntax/BuildTree.cpp
index 1f192180ec45..7654e3dfaa01 100644
--- a/clang/lib/Tooling/Syntax/BuildTree.cpp
+++ b/clang/lib/Tooling/Syntax/BuildTree.cpp
@@ -13,6 +13,8 @@
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/IgnoreExpr.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TypeLoc.h"
@@ -44,8 +46,44 @@
using namespace clang;
+// Ignores the implicit `CXXConstructExpr` for copy/move constructor calls
+// generated by the compiler, as well as in implicit conversions like the one
+// wrapping `1` in `X x = 1;`.
+static Expr *IgnoreImplicitConstructorSingleStep(Expr *E) {
+ if (auto *C = dyn_cast<CXXConstructExpr>(E)) {
+ auto NumArgs = C->getNumArgs();
+ if (NumArgs == 1 || (NumArgs > 1 && isa<CXXDefaultArgExpr>(C->getArg(1)))) {
+ Expr *A = C->getArg(0);
+ if (C->getParenOrBraceRange().isInvalid())
+ return A;
+ }
+ }
+ return E;
+}
+
+// In:
+// struct X {
+// X(int)
+// };
+// X x = X(1);
+// Ignores the implicit `CXXFunctionalCastExpr` that wraps
+// `CXXConstructExpr X(1)`.
+static Expr *IgnoreCXXFunctionalCastExprWrappingConstructor(Expr *E) {
+ if (auto *F = dyn_cast<CXXFunctionalCastExpr>(E)) {
+ if (F->getCastKind() == CK_ConstructorConversion)
+ return F->getSubExpr();
+ }
+ return E;
+}
+
+static Expr *IgnoreImplicit(Expr *E) {
+ return IgnoreExprNodes(E, IgnoreImplicitSingleStep,
+ IgnoreImplicitConstructorSingleStep,
+ IgnoreCXXFunctionalCastExprWrappingConstructor);
+}
+
LLVM_ATTRIBUTE_UNUSED
-static bool isImplicitExpr(clang::Expr *E) { return E->IgnoreImplicit() != E; }
+static bool isImplicitExpr(Expr *E) { return IgnoreImplicit(E) != E; }
namespace {
/// Get start location of the Declarator from the TypeLoc.
@@ -116,6 +154,13 @@ private:
};
} // namespace
+static CallExpr::arg_range dropDefaultArgs(CallExpr::arg_range Args) {
+ auto FirstDefaultArg = std::find_if(Args.begin(), Args.end(), [](auto It) {
+ return isa<CXXDefaultArgExpr>(It);
+ });
+ return llvm::make_range(Args.begin(), FirstDefaultArg);
+}
+
static syntax::NodeKind getOperatorNodeKind(const CXXOperatorCallExpr &E) {
switch (E.getOperator()) {
// Comparison
@@ -184,10 +229,11 @@ static syntax::NodeKind getOperatorNodeKind(const CXXOperatorCallExpr &E) {
case OO_Array_New:
case OO_Array_Delete:
case OO_Coawait:
- case OO_Call:
case OO_Subscript:
case OO_Arrow:
return syntax::NodeKind::UnknownExpression;
+ case OO_Call:
+ return syntax::NodeKind::CallExpression;
case OO_Conditional: // not overloadable
case NUM_OVERLOADED_OPERATORS:
case OO_None:
@@ -196,18 +242,58 @@ static syntax::NodeKind getOperatorNodeKind(const CXXOperatorCallExpr &E) {
llvm_unreachable("Unknown OverloadedOperatorKind enum");
}
+/// Get the start of the qualified name. In the examples below it gives the
+/// location of the `^`:
+/// `int ^a;`
+/// `int *^a;`
+/// `int ^a::S::f(){}`
+static SourceLocation getQualifiedNameStart(NamedDecl *D) {
+ assert((isa<DeclaratorDecl, TypedefNameDecl>(D)) &&
+ "only DeclaratorDecl and TypedefNameDecl are supported.");
+
+ auto DN = D->getDeclName();
+ bool IsAnonymous = DN.isIdentifier() && !DN.getAsIdentifierInfo();
+ if (IsAnonymous)
+ return SourceLocation();
+
+ if (const auto *DD = dyn_cast<DeclaratorDecl>(D)) {
+ if (DD->getQualifierLoc()) {
+ return DD->getQualifierLoc().getBeginLoc();
+ }
+ }
+
+ return D->getLocation();
+}
+
+/// Gets the range of the initializer inside an init-declarator C++ [dcl.decl].
+/// `int a;` -> range of ``,
+/// `int *a = nullptr` -> range of `= nullptr`.
+/// `int a{}` -> range of `{}`.
+/// `int a()` -> range of `()`.
+static SourceRange getInitializerRange(Decl *D) {
+ if (auto *V = dyn_cast<VarDecl>(D)) {
+ auto *I = V->getInit();
+ // Initializers in range-based-for are not part of the declarator
+ if (I && !V->isCXXForRangeDecl())
+ return I->getSourceRange();
+ }
+
+ return SourceRange();
+}
+
/// Gets the range of declarator as defined by the C++ grammar. E.g.
/// `int a;` -> range of `a`,
/// `int *a;` -> range of `*a`,
/// `int a[10];` -> range of `a[10]`,
/// `int a[1][2][3];` -> range of `a[1][2][3]`,
/// `int *a = nullptr` -> range of `*a = nullptr`.
-/// FIMXE: \p Name must be a source range, e.g. for `operator+`.
+/// `int S::f(){}` -> range of `S::f()`.
+/// FIXME: \p Name must be a source range.
static SourceRange getDeclaratorRange(const SourceManager &SM, TypeLoc T,
SourceLocation Name,
SourceRange Initializer) {
SourceLocation Start = GetStartLoc().Visit(T);
- SourceLocation End = T.getSourceRange().getEnd();
+ SourceLocation End = T.getEndLoc();
assert(End.isValid());
if (Name.isValid()) {
if (Start.isInvalid())
@@ -241,10 +327,24 @@ public:
assert(Added && "mapping added twice");
}
+ void add(NestedNameSpecifierLoc From, syntax::Tree *To) {
+ assert(To != nullptr);
+ assert(From.hasQualifier());
+
+ bool Added = NNSNodes.insert({From, To}).second;
+ (void)Added;
+ assert(Added && "mapping added twice");
+ }
+
syntax::Tree *find(ASTPtr P) const { return Nodes.lookup(P); }
+ syntax::Tree *find(NestedNameSpecifierLoc P) const {
+ return NNSNodes.lookup(P);
+ }
+
private:
llvm::DenseMap<ASTPtr, syntax::Tree *> Nodes;
+ llvm::DenseMap<NestedNameSpecifierLoc, syntax::Tree *> NNSNodes;
};
} // namespace
@@ -266,28 +366,48 @@ private:
class syntax::TreeBuilder {
public:
TreeBuilder(syntax::Arena &Arena) : Arena(Arena), Pending(Arena) {
- for (const auto &T : Arena.tokenBuffer().expandedTokens())
- LocationToToken.insert({T.location().getRawEncoding(), &T});
+ for (const auto &T : Arena.getTokenBuffer().expandedTokens())
+ LocationToToken.insert({T.location(), &T});
}
- llvm::BumpPtrAllocator &allocator() { return Arena.allocator(); }
- const SourceManager &sourceManager() const { return Arena.sourceManager(); }
+ llvm::BumpPtrAllocator &allocator() { return Arena.getAllocator(); }
+ const SourceManager &sourceManager() const {
+ return Arena.getSourceManager();
+ }
/// Populate children for \p New node, assuming it covers tokens from \p
/// Range.
- void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New,
- ASTPtr From) {
+ void foldNode(ArrayRef<syntax::Token> Range, syntax::Tree *New, ASTPtr From) {
assert(New);
Pending.foldChildren(Arena, Range, New);
if (From)
Mapping.add(From, New);
}
- void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New,
- TypeLoc L) {
+
+ void foldNode(ArrayRef<syntax::Token> Range, syntax::Tree *New, TypeLoc L) {
// FIXME: add mapping for TypeLocs
foldNode(Range, New, nullptr);
}
+ void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New,
+ NestedNameSpecifierLoc From) {
+ assert(New);
+ Pending.foldChildren(Arena, Range, New);
+ if (From)
+ Mapping.add(From, New);
+ }
+
+ /// Populate children for \p New list, assuming it covers tokens from a
+ /// subrange of \p SuperRange.
+ void foldList(ArrayRef<syntax::Token> SuperRange, syntax::List *New,
+ ASTPtr From) {
+ assert(New);
+ auto ListRange = Pending.shrinkToFitList(SuperRange);
+ Pending.foldChildren(Arena, ListRange, New);
+ if (From)
+ Mapping.add(From, New);
+ }
+
/// Notifies that we should not consume trailing semicolon when computing
/// token range of \p D.
void noticeDeclWithoutSemicolon(Decl *D);
@@ -309,16 +429,18 @@ public:
void markChild(syntax::Node *N, NodeRole R);
/// Set role for the syntax node matching \p N.
void markChild(ASTPtr N, NodeRole R);
+ /// Set role for the syntax node matching \p N.
+ void markChild(NestedNameSpecifierLoc N, NodeRole R);
/// Finish building the tree and consume the root node.
syntax::TranslationUnit *finalize() && {
- auto Tokens = Arena.tokenBuffer().expandedTokens();
+ auto Tokens = Arena.getTokenBuffer().expandedTokens();
assert(!Tokens.empty());
assert(Tokens.back().kind() == tok::eof);
// Build the root of the tree, consuming all the children.
Pending.foldChildren(Arena, Tokens.drop_back(),
- new (Arena.allocator()) syntax::TranslationUnit);
+ new (Arena.getAllocator()) syntax::TranslationUnit);
auto *TU = cast<syntax::TranslationUnit>(std::move(Pending).finalize());
TU->assertInvariantsRecursive();
@@ -329,7 +451,7 @@ public:
const syntax::Token *findToken(SourceLocation L) const;
/// Finds the syntax tokens corresponding to the \p SourceRange.
- llvm::ArrayRef<syntax::Token> getRange(SourceRange Range) const {
+ ArrayRef<syntax::Token> getRange(SourceRange Range) const {
assert(Range.isValid());
return getRange(Range.getBegin(), Range.getEnd());
}
@@ -337,16 +459,16 @@ public:
/// Finds the syntax tokens corresponding to the passed source locations.
/// \p First is the start position of the first token and \p Last is the start
/// position of the last token.
- llvm::ArrayRef<syntax::Token> getRange(SourceLocation First,
- SourceLocation Last) const {
+ ArrayRef<syntax::Token> getRange(SourceLocation First,
+ SourceLocation Last) const {
assert(First.isValid());
assert(Last.isValid());
assert(First == Last ||
- Arena.sourceManager().isBeforeInTranslationUnit(First, Last));
+ Arena.getSourceManager().isBeforeInTranslationUnit(First, Last));
return llvm::makeArrayRef(findToken(First), std::next(findToken(Last)));
}
- llvm::ArrayRef<syntax::Token>
+ ArrayRef<syntax::Token>
getTemplateRange(const ClassTemplateSpecializationDecl *D) const {
auto Tokens = getRange(D->getSourceRange());
return maybeAppendSemicolon(Tokens, D);
@@ -354,11 +476,9 @@ public:
/// Returns true if \p D is the last declarator in a chain and is thus
/// reponsible for creating SimpleDeclaration for the whole chain.
- template <class T>
- bool isResponsibleForCreatingDeclaration(const T *D) const {
- static_assert((std::is_base_of<DeclaratorDecl, T>::value ||
- std::is_base_of<TypedefNameDecl, T>::value),
- "only DeclaratorDecl and TypedefNameDecl are supported.");
+ bool isResponsibleForCreatingDeclaration(const Decl *D) const {
+ assert((isa<DeclaratorDecl, TypedefNameDecl>(D)) &&
+ "only DeclaratorDecl and TypedefNameDecl are supported.");
const Decl *Next = D->getNextDeclInContext();
@@ -366,15 +486,14 @@ public:
if (Next == nullptr) {
return true;
}
- const auto *NextT = llvm::dyn_cast<T>(Next);
// Next sibling is not the same type, this one is responsible.
- if (NextT == nullptr) {
+ if (D->getKind() != Next->getKind()) {
return true;
}
// Next sibling doesn't begin at the same loc, it must be a different
// declaration, so this declarator is responsible.
- if (NextT->getBeginLoc() != D->getBeginLoc()) {
+ if (Next->getBeginLoc() != D->getBeginLoc()) {
return true;
}
@@ -383,23 +502,23 @@ public:
return false;
}
- llvm::ArrayRef<syntax::Token> getDeclarationRange(Decl *D) {
- llvm::ArrayRef<clang::syntax::Token> Tokens;
+ ArrayRef<syntax::Token> getDeclarationRange(Decl *D) {
+ ArrayRef<syntax::Token> Tokens;
// We want to drop the template parameters for specializations.
- if (const auto *S = llvm::dyn_cast<TagDecl>(D))
+ if (const auto *S = dyn_cast<TagDecl>(D))
Tokens = getRange(S->TypeDecl::getBeginLoc(), S->getEndLoc());
else
Tokens = getRange(D->getSourceRange());
return maybeAppendSemicolon(Tokens, D);
}
- llvm::ArrayRef<syntax::Token> getExprRange(const Expr *E) const {
+ ArrayRef<syntax::Token> getExprRange(const Expr *E) const {
return getRange(E->getSourceRange());
}
/// Find the adjusted range for the statement, consuming the trailing
/// semicolon when needed.
- llvm::ArrayRef<syntax::Token> getStmtRange(const Stmt *S) const {
+ ArrayRef<syntax::Token> getStmtRange(const Stmt *S) const {
auto Tokens = getRange(S->getSourceRange());
if (isa<CompoundStmt>(S))
return Tokens;
@@ -412,10 +531,9 @@ public:
}
private:
- llvm::ArrayRef<syntax::Token>
- maybeAppendSemicolon(llvm::ArrayRef<syntax::Token> Tokens,
- const Decl *D) const {
- if (llvm::isa<NamespaceDecl>(D))
+ ArrayRef<syntax::Token> maybeAppendSemicolon(ArrayRef<syntax::Token> Tokens,
+ const Decl *D) const {
+ if (isa<NamespaceDecl>(D))
return Tokens;
if (DeclsWithoutSemicolons.count(D))
return Tokens;
@@ -424,8 +542,8 @@ private:
return withTrailingSemicolon(Tokens);
}
- llvm::ArrayRef<syntax::Token>
- withTrailingSemicolon(llvm::ArrayRef<syntax::Token> Tokens) const {
+ ArrayRef<syntax::Token>
+ withTrailingSemicolon(ArrayRef<syntax::Token> Tokens) const {
assert(!Tokens.empty());
assert(Tokens.back().kind() != tok::eof);
// We never consume 'eof', so looking at the next token is ok.
@@ -435,7 +553,7 @@ private:
}
void setRole(syntax::Node *N, NodeRole R) {
- assert(N->role() == NodeRole::Detached);
+ assert(N->getRole() == NodeRole::Detached);
N->setRole(R);
}
@@ -447,20 +565,19 @@ private:
/// Ensures that added nodes properly nest and cover the whole token stream.
struct Forest {
Forest(syntax::Arena &A) {
- assert(!A.tokenBuffer().expandedTokens().empty());
- assert(A.tokenBuffer().expandedTokens().back().kind() == tok::eof);
+ assert(!A.getTokenBuffer().expandedTokens().empty());
+ assert(A.getTokenBuffer().expandedTokens().back().kind() == tok::eof);
// Create all leaf nodes.
// Note that we do not have 'eof' in the tree.
- for (auto &T : A.tokenBuffer().expandedTokens().drop_back()) {
- auto *L = new (A.allocator()) syntax::Leaf(&T);
+ for (const auto &T : A.getTokenBuffer().expandedTokens().drop_back()) {
+ auto *L = new (A.getAllocator()) syntax::Leaf(&T);
L->Original = true;
- L->CanModify = A.tokenBuffer().spelledForExpanded(T).hasValue();
+ L->CanModify = A.getTokenBuffer().spelledForExpanded(T).hasValue();
Trees.insert(Trees.end(), {&T, L});
}
}
- void assignRole(llvm::ArrayRef<syntax::Token> Range,
- syntax::NodeRole Role) {
+ void assignRole(ArrayRef<syntax::Token> Range, syntax::NodeRole Role) {
assert(!Range.empty());
auto It = Trees.lower_bound(Range.begin());
assert(It != Trees.end() && "no node found");
@@ -468,17 +585,45 @@ private:
assert((std::next(It) == Trees.end() ||
std::next(It)->first == Range.end()) &&
"no child with the specified range");
- assert(It->second->role() == NodeRole::Detached &&
+ assert(It->second->getRole() == NodeRole::Detached &&
"re-assigning role for a child");
It->second->setRole(Role);
}
+ /// Shrink \p Range to a subrange that only contains tokens of a list.
+ /// List elements and delimiters should already have correct roles.
+ ArrayRef<syntax::Token> shrinkToFitList(ArrayRef<syntax::Token> Range) {
+ auto BeginChildren = Trees.lower_bound(Range.begin());
+ assert((BeginChildren == Trees.end() ||
+ BeginChildren->first == Range.begin()) &&
+ "Range crosses boundaries of existing subtrees");
+
+ auto EndChildren = Trees.lower_bound(Range.end());
+ assert(
+ (EndChildren == Trees.end() || EndChildren->first == Range.end()) &&
+ "Range crosses boundaries of existing subtrees");
+
+ auto BelongsToList = [](decltype(Trees)::value_type KV) {
+ auto Role = KV.second->getRole();
+ return Role == syntax::NodeRole::ListElement ||
+ Role == syntax::NodeRole::ListDelimiter;
+ };
+
+ auto BeginListChildren =
+ std::find_if(BeginChildren, EndChildren, BelongsToList);
+
+ auto EndListChildren =
+ std::find_if_not(BeginListChildren, EndChildren, BelongsToList);
+
+ return ArrayRef<syntax::Token>(BeginListChildren->first,
+ EndListChildren->first);
+ }
+
/// Add \p Node to the forest and attach child nodes based on \p Tokens.
- void foldChildren(const syntax::Arena &A,
- llvm::ArrayRef<syntax::Token> Tokens,
+ void foldChildren(const syntax::Arena &A, ArrayRef<syntax::Token> Tokens,
syntax::Tree *Node) {
// Attach children to `Node`.
- assert(Node->firstChild() == nullptr && "node already has children");
+ assert(Node->getFirstChild() == nullptr && "node already has children");
auto *FirstToken = Tokens.begin();
auto BeginChildren = Trees.lower_bound(FirstToken);
@@ -491,17 +636,17 @@ private:
(EndChildren == Trees.end() || EndChildren->first == Tokens.end()) &&
"fold crosses boundaries of existing subtrees");
- // We need to go in reverse order, because we can only prepend.
- for (auto It = EndChildren; It != BeginChildren; --It) {
- auto *C = std::prev(It)->second;
- if (C->role() == NodeRole::Detached)
+ for (auto It = BeginChildren; It != EndChildren; ++It) {
+ auto *C = It->second;
+ if (C->getRole() == NodeRole::Detached)
C->setRole(NodeRole::Unknown);
- Node->prependChildLowLevel(C);
+ Node->appendChildLowLevel(C);
}
// Mark that this node came from the AST and is backed by the source code.
Node->Original = true;
- Node->CanModify = A.tokenBuffer().spelledForExpanded(Tokens).hasValue();
+ Node->CanModify =
+ A.getTokenBuffer().spelledForExpanded(Tokens).hasValue();
Trees.erase(BeginChildren, EndChildren);
Trees.insert({FirstToken, Node});
@@ -521,12 +666,12 @@ private:
unsigned CoveredTokens =
It != Trees.end()
? (std::next(It)->first - It->first)
- : A.tokenBuffer().expandedTokens().end() - It->first;
+ : A.getTokenBuffer().expandedTokens().end() - It->first;
- R += std::string(llvm::formatv(
- "- '{0}' covers '{1}'+{2} tokens\n", It->second->kind(),
- It->first->text(A.sourceManager()), CoveredTokens));
- R += It->second->dump(A);
+ R += std::string(
+ formatv("- '{0}' covers '{1}'+{2} tokens\n", It->second->getKind(),
+ It->first->text(A.getSourceManager()), CoveredTokens));
+ R += It->second->dump(A.getSourceManager());
}
return R;
}
@@ -543,8 +688,7 @@ private:
syntax::Arena &Arena;
/// To quickly find tokens by their start location.
- llvm::DenseMap</*SourceLocation*/ unsigned, const syntax::Token *>
- LocationToToken;
+ llvm::DenseMap<SourceLocation, const syntax::Token *> LocationToToken;
Forest Pending;
llvm::DenseSet<Decl *> DeclsWithoutSemicolons;
ASTToSyntaxMapping Mapping;
@@ -623,7 +767,7 @@ public:
foldTemplateDeclaration(R, TemplateKW, DeclarationRange, nullptr);
DeclarationRange = R;
};
- if (auto *S = llvm::dyn_cast<ClassTemplatePartialSpecializationDecl>(C))
+ if (auto *S = dyn_cast<ClassTemplatePartialSpecializationDecl>(C))
ConsumeTemplateParameters(*S->getTemplateParameters());
for (unsigned I = C->getNumTemplateParameterLists(); 0 < I; --I)
ConsumeTemplateParameters(*C->getTemplateParameterList(I - 1));
@@ -641,7 +785,7 @@ public:
Builder.markChildToken(S->getLBracLoc(), NodeRole::OpenParen);
for (auto *Child : S->body())
- Builder.markStmtChild(Child, NodeRole::CompoundStatement_statement);
+ Builder.markStmtChild(Child, NodeRole::Statement);
Builder.markChildToken(S->getRBracLoc(), NodeRole::CloseParen);
Builder.foldNode(Builder.getStmtRange(S),
@@ -677,12 +821,12 @@ public:
}
bool TraverseStmt(Stmt *S) {
- if (auto *DS = llvm::dyn_cast_or_null<DeclStmt>(S)) {
+ if (auto *DS = dyn_cast_or_null<DeclStmt>(S)) {
// We want to consume the semicolon, make sure SimpleDeclaration does not.
for (auto *D : DS->decls())
Builder.noticeDeclWithoutSemicolon(D);
- } else if (auto *E = llvm::dyn_cast_or_null<Expr>(S)) {
- return RecursiveASTVisitor::TraverseStmt(E->IgnoreImplicit());
+ } else if (auto *E = dyn_cast_or_null<Expr>(S)) {
+ return RecursiveASTVisitor::TraverseStmt(IgnoreImplicit(E));
}
return RecursiveASTVisitor::TraverseStmt(S);
}
@@ -695,21 +839,6 @@ public:
return true;
}
- syntax::NestedNameSpecifier *
- BuildNestedNameSpecifier(NestedNameSpecifierLoc QualifierLoc) {
- if (!QualifierLoc)
- return nullptr;
- for (auto it = QualifierLoc; it; it = it.getPrefix()) {
- auto *NS = new (allocator()) syntax::NameSpecifier;
- Builder.foldNode(Builder.getRange(it.getLocalSourceRange()), NS, nullptr);
- Builder.markChild(NS, syntax::NodeRole::NestedNameSpecifier_specifier);
- }
- auto *NNS = new (allocator()) syntax::NestedNameSpecifier;
- Builder.foldNode(Builder.getRange(QualifierLoc.getSourceRange()), NNS,
- nullptr);
- return NNS;
- }
-
bool TraverseUserDefinedLiteral(UserDefinedLiteral *S) {
// The semantic AST node `UserDefinedLiteral` (UDL) may have one child node
// referencing the location of the UDL suffix (`_w` in `1.2_w`). The
@@ -722,16 +851,16 @@ public:
syntax::UserDefinedLiteralExpression *
buildUserDefinedLiteral(UserDefinedLiteral *S) {
switch (S->getLiteralOperatorKind()) {
- case clang::UserDefinedLiteral::LOK_Integer:
+ case UserDefinedLiteral::LOK_Integer:
return new (allocator()) syntax::IntegerUserDefinedLiteralExpression;
- case clang::UserDefinedLiteral::LOK_Floating:
+ case UserDefinedLiteral::LOK_Floating:
return new (allocator()) syntax::FloatUserDefinedLiteralExpression;
- case clang::UserDefinedLiteral::LOK_Character:
+ case UserDefinedLiteral::LOK_Character:
return new (allocator()) syntax::CharUserDefinedLiteralExpression;
- case clang::UserDefinedLiteral::LOK_String:
+ case UserDefinedLiteral::LOK_String:
return new (allocator()) syntax::StringUserDefinedLiteralExpression;
- case clang::UserDefinedLiteral::LOK_Raw:
- case clang::UserDefinedLiteral::LOK_Template:
+ case UserDefinedLiteral::LOK_Raw:
+ case UserDefinedLiteral::LOK_Template:
// For raw literal operator and numeric literal operator template we
// cannot get the type of the operand in the semantic AST. We get this
// information from the token. As integer and floating point have the same
@@ -759,34 +888,202 @@ public:
return true;
}
- bool WalkUpFromDeclRefExpr(DeclRefExpr *S) {
- if (auto *NNS = BuildNestedNameSpecifier(S->getQualifierLoc()))
- Builder.markChild(NNS, syntax::NodeRole::IdExpression_qualifier);
-
- auto *unqualifiedId = new (allocator()) syntax::UnqualifiedId;
- // Get `UnqualifiedId` from `DeclRefExpr`.
- // FIXME: Extract this logic so that it can be used by `MemberExpr`,
- // and other semantic constructs, now it is tied to `DeclRefExpr`.
- if (!S->hasExplicitTemplateArgs()) {
- Builder.foldNode(Builder.getRange(S->getNameInfo().getSourceRange()),
- unqualifiedId, nullptr);
- } else {
- auto templateIdSourceRange =
- SourceRange(S->getNameInfo().getBeginLoc(), S->getRAngleLoc());
- Builder.foldNode(Builder.getRange(templateIdSourceRange), unqualifiedId,
- nullptr);
+ // FIXME: Fix `NestedNameSpecifierLoc::getLocalSourceRange` for the
+ // `DependentTemplateSpecializationType` case.
+ /// Given a nested-name-specifier return the range for the last name
+ /// specifier.
+ ///
+ /// e.g. `std::T::template X<U>::` => `template X<U>::`
+ SourceRange getLocalSourceRange(const NestedNameSpecifierLoc &NNSLoc) {
+ auto SR = NNSLoc.getLocalSourceRange();
+
+ // The method `NestedNameSpecifierLoc::getLocalSourceRange` *should*
+ // return the desired `SourceRange`, but there is a corner case. For a
+ // `DependentTemplateSpecializationType` this method returns its
+ // qualifiers as well, in other words in the example above this method
+ // returns `T::template X<U>::` instead of only `template X<U>::`
+ if (auto TL = NNSLoc.getTypeLoc()) {
+ if (auto DependentTL =
+ TL.getAs<DependentTemplateSpecializationTypeLoc>()) {
+ // The 'template' keyword is always present in dependent template
+ // specializations. Except in the case of incorrect code
+ // TODO: Treat the case of incorrect code.
+ SR.setBegin(DependentTL.getTemplateKeywordLoc());
+ }
+ }
+
+ return SR;
+ }
+
+ syntax::NodeKind getNameSpecifierKind(const NestedNameSpecifier &NNS) {
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Global:
+ return syntax::NodeKind::GlobalNameSpecifier;
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Identifier:
+ return syntax::NodeKind::IdentifierNameSpecifier;
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ return syntax::NodeKind::SimpleTemplateNameSpecifier;
+ case NestedNameSpecifier::TypeSpec: {
+ const auto *NNSType = NNS.getAsType();
+ assert(NNSType);
+ if (isa<DecltypeType>(NNSType))
+ return syntax::NodeKind::DecltypeNameSpecifier;
+ if (isa<TemplateSpecializationType, DependentTemplateSpecializationType>(
+ NNSType))
+ return syntax::NodeKind::SimpleTemplateNameSpecifier;
+ return syntax::NodeKind::IdentifierNameSpecifier;
+ }
+ default:
+ // FIXME: Support Microsoft's __super
+ llvm::report_fatal_error("We don't yet support the __super specifier",
+ true);
+ }
+ }
+
+ syntax::NameSpecifier *
+ buildNameSpecifier(const NestedNameSpecifierLoc &NNSLoc) {
+ assert(NNSLoc.hasQualifier());
+ auto NameSpecifierTokens =
+ Builder.getRange(getLocalSourceRange(NNSLoc)).drop_back();
+ switch (getNameSpecifierKind(*NNSLoc.getNestedNameSpecifier())) {
+ case syntax::NodeKind::GlobalNameSpecifier:
+ return new (allocator()) syntax::GlobalNameSpecifier;
+ case syntax::NodeKind::IdentifierNameSpecifier: {
+ assert(NameSpecifierTokens.size() == 1);
+ Builder.markChildToken(NameSpecifierTokens.begin(),
+ syntax::NodeRole::Unknown);
+ auto *NS = new (allocator()) syntax::IdentifierNameSpecifier;
+ Builder.foldNode(NameSpecifierTokens, NS, nullptr);
+ return NS;
+ }
+ case syntax::NodeKind::SimpleTemplateNameSpecifier: {
+ // TODO: Build `SimpleTemplateNameSpecifier` children and implement
+ // accessors to them.
+ // Be aware, we cannot do that simply by calling `TraverseTypeLoc`,
+ // some `TypeLoc`s have inside them the previous name specifier and
+ // we want to treat them independently.
+ auto *NS = new (allocator()) syntax::SimpleTemplateNameSpecifier;
+ Builder.foldNode(NameSpecifierTokens, NS, nullptr);
+ return NS;
+ }
+ case syntax::NodeKind::DecltypeNameSpecifier: {
+ const auto TL = NNSLoc.getTypeLoc().castAs<DecltypeTypeLoc>();
+ if (!RecursiveASTVisitor::TraverseDecltypeTypeLoc(TL))
+ return nullptr;
+ auto *NS = new (allocator()) syntax::DecltypeNameSpecifier;
+ // TODO: Implement accessor to `DecltypeNameSpecifier` inner
+ // `DecltypeTypeLoc`.
+ // For that add mapping from `TypeLoc` to `syntax::Node*` then:
+ // Builder.markChild(TypeLoc, syntax::NodeRole);
+ Builder.foldNode(NameSpecifierTokens, NS, nullptr);
+ return NS;
+ }
+ default:
+ llvm_unreachable("getChildKind() does not return this value");
+ }
+ }
+
+ // To build syntax tree nodes for NestedNameSpecifierLoc we override
+ // Traverse instead of WalkUpFrom because we want to traverse the children
+ // ourselves and build a list instead of a nested tree of name specifier
+ // prefixes.
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc QualifierLoc) {
+ if (!QualifierLoc)
+ return true;
+ for (auto It = QualifierLoc; It; It = It.getPrefix()) {
+ auto *NS = buildNameSpecifier(It);
+ if (!NS)
+ return false;
+ Builder.markChild(NS, syntax::NodeRole::ListElement);
+ Builder.markChildToken(It.getEndLoc(), syntax::NodeRole::ListDelimiter);
+ }
+ Builder.foldNode(Builder.getRange(QualifierLoc.getSourceRange()),
+ new (allocator()) syntax::NestedNameSpecifier,
+ QualifierLoc);
+ return true;
+ }
+
+ syntax::IdExpression *buildIdExpression(NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKeywordLoc,
+ SourceRange UnqualifiedIdLoc,
+ ASTPtr From) {
+ if (QualifierLoc) {
+ Builder.markChild(QualifierLoc, syntax::NodeRole::Qualifier);
+ if (TemplateKeywordLoc.isValid())
+ Builder.markChildToken(TemplateKeywordLoc,
+ syntax::NodeRole::TemplateKeyword);
}
- Builder.markChild(unqualifiedId, syntax::NodeRole::IdExpression_id);
+
+ auto *TheUnqualifiedId = new (allocator()) syntax::UnqualifiedId;
+ Builder.foldNode(Builder.getRange(UnqualifiedIdLoc), TheUnqualifiedId,
+ nullptr);
+ Builder.markChild(TheUnqualifiedId, syntax::NodeRole::UnqualifiedId);
+
+ auto IdExpressionBeginLoc =
+ QualifierLoc ? QualifierLoc.getBeginLoc() : UnqualifiedIdLoc.getBegin();
+
+ auto *TheIdExpression = new (allocator()) syntax::IdExpression;
+ Builder.foldNode(
+ Builder.getRange(IdExpressionBeginLoc, UnqualifiedIdLoc.getEnd()),
+ TheIdExpression, From);
+
+ return TheIdExpression;
+ }
+
+ bool WalkUpFromMemberExpr(MemberExpr *S) {
+ // For `MemberExpr` with implicit `this->` we generate a simple
+ // `id-expression` syntax node, beacuse an implicit `member-expression` is
+ // syntactically undistinguishable from an `id-expression`
+ if (S->isImplicitAccess()) {
+ buildIdExpression(S->getQualifierLoc(), S->getTemplateKeywordLoc(),
+ SourceRange(S->getMemberLoc(), S->getEndLoc()), S);
+ return true;
+ }
+
+ auto *TheIdExpression = buildIdExpression(
+ S->getQualifierLoc(), S->getTemplateKeywordLoc(),
+ SourceRange(S->getMemberLoc(), S->getEndLoc()), nullptr);
+
+ Builder.markChild(TheIdExpression, syntax::NodeRole::Member);
+
+ Builder.markExprChild(S->getBase(), syntax::NodeRole::Object);
+ Builder.markChildToken(S->getOperatorLoc(), syntax::NodeRole::AccessToken);
Builder.foldNode(Builder.getExprRange(S),
- new (allocator()) syntax::IdExpression, S);
+ new (allocator()) syntax::MemberExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromDeclRefExpr(DeclRefExpr *S) {
+ buildIdExpression(S->getQualifierLoc(), S->getTemplateKeywordLoc(),
+ SourceRange(S->getLocation(), S->getEndLoc()), S);
+
+ return true;
+ }
+
+ // Same logic as DeclRefExpr.
+ bool WalkUpFromDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *S) {
+ buildIdExpression(S->getQualifierLoc(), S->getTemplateKeywordLoc(),
+ SourceRange(S->getLocation(), S->getEndLoc()), S);
+
+ return true;
+ }
+
+ bool WalkUpFromCXXThisExpr(CXXThisExpr *S) {
+ if (!S->isImplicit()) {
+ Builder.markChildToken(S->getLocation(),
+ syntax::NodeRole::IntroducerKeyword);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::ThisExpression, S);
+ }
return true;
}
bool WalkUpFromParenExpr(ParenExpr *S) {
Builder.markChildToken(S->getLParen(), syntax::NodeRole::OpenParen);
- Builder.markExprChild(S->getSubExpr(),
- syntax::NodeRole::ParenExpression_subExpression);
+ Builder.markExprChild(S->getSubExpr(), syntax::NodeRole::SubExpression);
Builder.markChildToken(S->getRParen(), syntax::NodeRole::CloseParen);
Builder.foldNode(Builder.getExprRange(S),
new (allocator()) syntax::ParenExpression, S);
@@ -837,9 +1134,8 @@ public:
bool WalkUpFromUnaryOperator(UnaryOperator *S) {
Builder.markChildToken(S->getOperatorLoc(),
- syntax::NodeRole::OperatorExpression_operatorToken);
- Builder.markExprChild(S->getSubExpr(),
- syntax::NodeRole::UnaryOperatorExpression_operand);
+ syntax::NodeRole::OperatorToken);
+ Builder.markExprChild(S->getSubExpr(), syntax::NodeRole::Operand);
if (S->isPostfix())
Builder.foldNode(Builder.getExprRange(S),
@@ -854,79 +1150,143 @@ public:
}
bool WalkUpFromBinaryOperator(BinaryOperator *S) {
- Builder.markExprChild(
- S->getLHS(), syntax::NodeRole::BinaryOperatorExpression_leftHandSide);
+ Builder.markExprChild(S->getLHS(), syntax::NodeRole::LeftHandSide);
Builder.markChildToken(S->getOperatorLoc(),
- syntax::NodeRole::OperatorExpression_operatorToken);
- Builder.markExprChild(
- S->getRHS(), syntax::NodeRole::BinaryOperatorExpression_rightHandSide);
+ syntax::NodeRole::OperatorToken);
+ Builder.markExprChild(S->getRHS(), syntax::NodeRole::RightHandSide);
Builder.foldNode(Builder.getExprRange(S),
new (allocator()) syntax::BinaryOperatorExpression, S);
return true;
}
+ /// Builds `CallArguments` syntax node from arguments that appear in source
+ /// code, i.e. not default arguments.
+ syntax::CallArguments *
+ buildCallArguments(CallExpr::arg_range ArgsAndDefaultArgs) {
+ auto Args = dropDefaultArgs(ArgsAndDefaultArgs);
+ for (auto *Arg : Args) {
+ Builder.markExprChild(Arg, syntax::NodeRole::ListElement);
+ const auto *DelimiterToken =
+ std::next(Builder.findToken(Arg->getEndLoc()));
+ if (DelimiterToken->kind() == clang::tok::TokenKind::comma)
+ Builder.markChildToken(DelimiterToken, syntax::NodeRole::ListDelimiter);
+ }
+
+ auto *Arguments = new (allocator()) syntax::CallArguments;
+ if (!Args.empty())
+ Builder.foldNode(Builder.getRange((*Args.begin())->getBeginLoc(),
+ (*(Args.end() - 1))->getEndLoc()),
+ Arguments, nullptr);
+
+ return Arguments;
+ }
+
+ bool WalkUpFromCallExpr(CallExpr *S) {
+ Builder.markExprChild(S->getCallee(), syntax::NodeRole::Callee);
+
+ const auto *LParenToken =
+ std::next(Builder.findToken(S->getCallee()->getEndLoc()));
+ // FIXME: Assert that `LParenToken` is indeed a `l_paren` once we have fixed
+ // the test on decltype desctructors.
+ if (LParenToken->kind() == clang::tok::l_paren)
+ Builder.markChildToken(LParenToken, syntax::NodeRole::OpenParen);
+
+ Builder.markChild(buildCallArguments(S->arguments()),
+ syntax::NodeRole::Arguments);
+
+ Builder.markChildToken(S->getRParenLoc(), syntax::NodeRole::CloseParen);
+
+ Builder.foldNode(Builder.getRange(S->getSourceRange()),
+ new (allocator()) syntax::CallExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromCXXConstructExpr(CXXConstructExpr *S) {
+ // Ignore the implicit calls to default constructors.
+ if ((S->getNumArgs() == 0 || isa<CXXDefaultArgExpr>(S->getArg(0))) &&
+ S->getParenOrBraceRange().isInvalid())
+ return true;
+ return RecursiveASTVisitor::WalkUpFromCXXConstructExpr(S);
+ }
+
bool TraverseCXXOperatorCallExpr(CXXOperatorCallExpr *S) {
- if (getOperatorNodeKind(*S) ==
- syntax::NodeKind::PostfixUnaryOperatorExpression) {
+ // To construct a syntax tree of the same shape for calls to built-in and
+ // user-defined operators, ignore the `DeclRefExpr` that refers to the
+ // operator and treat it as a simple token. Do that by traversing
+ // arguments instead of children.
+ for (auto *child : S->arguments()) {
// A postfix unary operator is declared as taking two operands. The
// second operand is used to distinguish from its prefix counterpart. In
// the semantic AST this "phantom" operand is represented as a
// `IntegerLiteral` with invalid `SourceLocation`. We skip visiting this
// operand because it does not correspond to anything written in source
- // code
- for (auto *child : S->children()) {
- if (child->getSourceRange().isInvalid())
- continue;
- if (!TraverseStmt(child))
- return false;
+ // code.
+ if (child->getSourceRange().isInvalid()) {
+ assert(getOperatorNodeKind(*S) ==
+ syntax::NodeKind::PostfixUnaryOperatorExpression);
+ continue;
}
- return WalkUpFromCXXOperatorCallExpr(S);
- } else
- return RecursiveASTVisitor::TraverseCXXOperatorCallExpr(S);
+ if (!TraverseStmt(child))
+ return false;
+ }
+ return WalkUpFromCXXOperatorCallExpr(S);
}
bool WalkUpFromCXXOperatorCallExpr(CXXOperatorCallExpr *S) {
switch (getOperatorNodeKind(*S)) {
case syntax::NodeKind::BinaryOperatorExpression:
- Builder.markExprChild(
- S->getArg(0),
- syntax::NodeRole::BinaryOperatorExpression_leftHandSide);
- Builder.markChildToken(
- S->getOperatorLoc(),
- syntax::NodeRole::OperatorExpression_operatorToken);
- Builder.markExprChild(
- S->getArg(1),
- syntax::NodeRole::BinaryOperatorExpression_rightHandSide);
+ Builder.markExprChild(S->getArg(0), syntax::NodeRole::LeftHandSide);
+ Builder.markChildToken(S->getOperatorLoc(),
+ syntax::NodeRole::OperatorToken);
+ Builder.markExprChild(S->getArg(1), syntax::NodeRole::RightHandSide);
Builder.foldNode(Builder.getExprRange(S),
new (allocator()) syntax::BinaryOperatorExpression, S);
return true;
case syntax::NodeKind::PrefixUnaryOperatorExpression:
- Builder.markChildToken(
- S->getOperatorLoc(),
- syntax::NodeRole::OperatorExpression_operatorToken);
- Builder.markExprChild(S->getArg(0),
- syntax::NodeRole::UnaryOperatorExpression_operand);
+ Builder.markChildToken(S->getOperatorLoc(),
+ syntax::NodeRole::OperatorToken);
+ Builder.markExprChild(S->getArg(0), syntax::NodeRole::Operand);
Builder.foldNode(Builder.getExprRange(S),
new (allocator()) syntax::PrefixUnaryOperatorExpression,
S);
return true;
case syntax::NodeKind::PostfixUnaryOperatorExpression:
- Builder.markChildToken(
- S->getOperatorLoc(),
- syntax::NodeRole::OperatorExpression_operatorToken);
- Builder.markExprChild(S->getArg(0),
- syntax::NodeRole::UnaryOperatorExpression_operand);
+ Builder.markChildToken(S->getOperatorLoc(),
+ syntax::NodeRole::OperatorToken);
+ Builder.markExprChild(S->getArg(0), syntax::NodeRole::Operand);
Builder.foldNode(Builder.getExprRange(S),
new (allocator()) syntax::PostfixUnaryOperatorExpression,
S);
return true;
+ case syntax::NodeKind::CallExpression: {
+ Builder.markExprChild(S->getArg(0), syntax::NodeRole::Callee);
+
+ const auto *LParenToken =
+ std::next(Builder.findToken(S->getArg(0)->getEndLoc()));
+ // FIXME: Assert that `LParenToken` is indeed a `l_paren` once we have
+ // fixed the test on decltype desctructors.
+ if (LParenToken->kind() == clang::tok::l_paren)
+ Builder.markChildToken(LParenToken, syntax::NodeRole::OpenParen);
+
+ Builder.markChild(buildCallArguments(CallExpr::arg_range(
+ S->arg_begin() + 1, S->arg_end())),
+ syntax::NodeRole::Arguments);
+
+ Builder.markChildToken(S->getRParenLoc(), syntax::NodeRole::CloseParen);
+
+ Builder.foldNode(Builder.getRange(S->getSourceRange()),
+ new (allocator()) syntax::CallExpression, S);
+ return true;
+ }
case syntax::NodeKind::UnknownExpression:
- return RecursiveASTVisitor::WalkUpFromCXXOperatorCallExpr(S);
+ return WalkUpFromExpr(S);
default:
llvm_unreachable("getOperatorNodeKind() does not return this value");
}
}
+ bool WalkUpFromCXXDefaultArgExpr(CXXDefaultArgExpr *S) { return true; }
+
bool WalkUpFromNamespaceDecl(NamespaceDecl *S) {
auto Tokens = Builder.getDeclarationRange(S);
if (Tokens.front().kind() == tok::coloncolon) {
@@ -939,6 +1299,8 @@ public:
return true;
}
+ // FIXME: Deleting the `TraverseParenTypeLoc` override doesn't change test
+ // results. Find test coverage or remove it.
bool TraverseParenTypeLoc(ParenTypeLoc L) {
// We reverse order of traversal to get the proper syntax structure.
if (!WalkUpFromParenTypeLoc(L))
@@ -957,19 +1319,35 @@ public:
// Declarator chunks, they are produced by type locs and some clang::Decls.
bool WalkUpFromArrayTypeLoc(ArrayTypeLoc L) {
Builder.markChildToken(L.getLBracketLoc(), syntax::NodeRole::OpenParen);
- Builder.markExprChild(L.getSizeExpr(),
- syntax::NodeRole::ArraySubscript_sizeExpression);
+ Builder.markExprChild(L.getSizeExpr(), syntax::NodeRole::Size);
Builder.markChildToken(L.getRBracketLoc(), syntax::NodeRole::CloseParen);
Builder.foldNode(Builder.getRange(L.getLBracketLoc(), L.getRBracketLoc()),
new (allocator()) syntax::ArraySubscript, L);
return true;
}
+ syntax::ParameterDeclarationList *
+ buildParameterDeclarationList(ArrayRef<ParmVarDecl *> Params) {
+ for (auto *P : Params) {
+ Builder.markChild(P, syntax::NodeRole::ListElement);
+ const auto *DelimiterToken = std::next(Builder.findToken(P->getEndLoc()));
+ if (DelimiterToken->kind() == clang::tok::TokenKind::comma)
+ Builder.markChildToken(DelimiterToken, syntax::NodeRole::ListDelimiter);
+ }
+ auto *Parameters = new (allocator()) syntax::ParameterDeclarationList;
+ if (!Params.empty())
+ Builder.foldNode(Builder.getRange(Params.front()->getBeginLoc(),
+ Params.back()->getEndLoc()),
+ Parameters, nullptr);
+ return Parameters;
+ }
+
bool WalkUpFromFunctionTypeLoc(FunctionTypeLoc L) {
Builder.markChildToken(L.getLParenLoc(), syntax::NodeRole::OpenParen);
- for (auto *P : L.getParams()) {
- Builder.markChild(P, syntax::NodeRole::ParametersAndQualifiers_parameter);
- }
+
+ Builder.markChild(buildParameterDeclarationList(L.getParams()),
+ syntax::NodeRole::Parameters);
+
Builder.markChildToken(L.getRParenLoc(), syntax::NodeRole::CloseParen);
Builder.foldNode(Builder.getRange(L.getLParenLoc(), L.getEndLoc()),
new (allocator()) syntax::ParametersAndQualifiers, L);
@@ -980,13 +1358,22 @@ public:
if (!L.getTypePtr()->hasTrailingReturn())
return WalkUpFromFunctionTypeLoc(L);
- auto *TrailingReturnTokens = BuildTrailingReturn(L);
+ auto *TrailingReturnTokens = buildTrailingReturn(L);
// Finish building the node for parameters.
- Builder.markChild(TrailingReturnTokens,
- syntax::NodeRole::ParametersAndQualifiers_trailingReturn);
+ Builder.markChild(TrailingReturnTokens, syntax::NodeRole::TrailingReturn);
return WalkUpFromFunctionTypeLoc(L);
}
+ bool TraverseMemberPointerTypeLoc(MemberPointerTypeLoc L) {
+ // In the source code "void (Y::*mp)()" `MemberPointerTypeLoc` corresponds
+ // to "Y::*" but it points to a `ParenTypeLoc` that corresponds to
+ // "(Y::*mp)" We thus reverse the order of traversal to get the proper
+ // syntax structure.
+ if (!WalkUpFromMemberPointerTypeLoc(L))
+ return false;
+ return TraverseTypeLoc(L.getPointeeLoc());
+ }
+
bool WalkUpFromMemberPointerTypeLoc(MemberPointerTypeLoc L) {
auto SR = L.getLocalSourceRange();
Builder.foldNode(Builder.getRange(SR),
@@ -1021,7 +1408,7 @@ public:
bool WalkUpFromCaseStmt(CaseStmt *S) {
Builder.markChildToken(S->getKeywordLoc(),
syntax::NodeRole::IntroducerKeyword);
- Builder.markExprChild(S->getLHS(), syntax::NodeRole::CaseStatement_value);
+ Builder.markExprChild(S->getLHS(), syntax::NodeRole::CaseValue);
Builder.markStmtChild(S->getSubStmt(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
new (allocator()) syntax::CaseStatement, S);
@@ -1039,12 +1426,9 @@ public:
bool WalkUpFromIfStmt(IfStmt *S) {
Builder.markChildToken(S->getIfLoc(), syntax::NodeRole::IntroducerKeyword);
- Builder.markStmtChild(S->getThen(),
- syntax::NodeRole::IfStatement_thenStatement);
- Builder.markChildToken(S->getElseLoc(),
- syntax::NodeRole::IfStatement_elseKeyword);
- Builder.markStmtChild(S->getElse(),
- syntax::NodeRole::IfStatement_elseStatement);
+ Builder.markStmtChild(S->getThen(), syntax::NodeRole::ThenStatement);
+ Builder.markChildToken(S->getElseLoc(), syntax::NodeRole::ElseKeyword);
+ Builder.markStmtChild(S->getElse(), syntax::NodeRole::ElseStatement);
Builder.foldNode(Builder.getStmtRange(S),
new (allocator()) syntax::IfStatement, S);
return true;
@@ -1086,8 +1470,7 @@ public:
bool WalkUpFromReturnStmt(ReturnStmt *S) {
Builder.markChildToken(S->getReturnLoc(),
syntax::NodeRole::IntroducerKeyword);
- Builder.markExprChild(S->getRetValue(),
- syntax::NodeRole::ReturnStatement_value);
+ Builder.markExprChild(S->getRetValue(), syntax::NodeRole::ReturnValue);
Builder.foldNode(Builder.getStmtRange(S),
new (allocator()) syntax::ReturnStatement, S);
return true;
@@ -1108,10 +1491,8 @@ public:
}
bool WalkUpFromStaticAssertDecl(StaticAssertDecl *S) {
- Builder.markExprChild(S->getAssertExpr(),
- syntax::NodeRole::StaticAssertDeclaration_condition);
- Builder.markExprChild(S->getMessage(),
- syntax::NodeRole::StaticAssertDeclaration_message);
+ Builder.markExprChild(S->getAssertExpr(), syntax::NodeRole::Condition);
+ Builder.markExprChild(S->getMessage(), syntax::NodeRole::Message);
Builder.foldNode(Builder.getDeclarationRange(S),
new (allocator()) syntax::StaticAssertDeclaration, S);
return true;
@@ -1161,69 +1542,53 @@ public:
}
private:
- template <class T> SourceLocation getQualifiedNameStart(T *D) {
- static_assert((std::is_base_of<DeclaratorDecl, T>::value ||
- std::is_base_of<TypedefNameDecl, T>::value),
- "only DeclaratorDecl and TypedefNameDecl are supported.");
-
- auto DN = D->getDeclName();
- bool IsAnonymous = DN.isIdentifier() && !DN.getAsIdentifierInfo();
- if (IsAnonymous)
- return SourceLocation();
-
- if (const auto *DD = llvm::dyn_cast<DeclaratorDecl>(D)) {
- if (DD->getQualifierLoc()) {
- return DD->getQualifierLoc().getBeginLoc();
- }
- }
-
- return D->getLocation();
- }
-
- SourceRange getInitializerRange(Decl *D) {
- if (auto *V = llvm::dyn_cast<VarDecl>(D)) {
- auto *I = V->getInit();
- // Initializers in range-based-for are not part of the declarator
- if (I && !V->isCXXForRangeDecl())
- return I->getSourceRange();
- }
-
- return SourceRange();
- }
-
/// Folds SimpleDeclarator node (if present) and in case this is the last
/// declarator in the chain it also folds SimpleDeclaration node.
template <class T> bool processDeclaratorAndDeclaration(T *D) {
- SourceRange Initializer = getInitializerRange(D);
- auto Range = getDeclaratorRange(Builder.sourceManager(),
- D->getTypeSourceInfo()->getTypeLoc(),
- getQualifiedNameStart(D), Initializer);
+ auto Range = getDeclaratorRange(
+ Builder.sourceManager(), D->getTypeSourceInfo()->getTypeLoc(),
+ getQualifiedNameStart(D), getInitializerRange(D));
// There doesn't have to be a declarator (e.g. `void foo(int)` only has
// declaration, but no declarator).
- if (Range.getBegin().isValid()) {
- auto *N = new (allocator()) syntax::SimpleDeclarator;
- Builder.foldNode(Builder.getRange(Range), N, nullptr);
- Builder.markChild(N, syntax::NodeRole::SimpleDeclaration_declarator);
+ if (!Range.getBegin().isValid()) {
+ Builder.markChild(new (allocator()) syntax::DeclaratorList,
+ syntax::NodeRole::Declarators);
+ Builder.foldNode(Builder.getDeclarationRange(D),
+ new (allocator()) syntax::SimpleDeclaration, D);
+ return true;
}
- if (Builder.isResponsibleForCreatingDeclaration(D)) {
- Builder.foldNode(Builder.getDeclarationRange(D),
+ auto *N = new (allocator()) syntax::SimpleDeclarator;
+ Builder.foldNode(Builder.getRange(Range), N, nullptr);
+ Builder.markChild(N, syntax::NodeRole::ListElement);
+
+ if (!Builder.isResponsibleForCreatingDeclaration(D)) {
+ // If this is not the last declarator in the declaration we expect a
+ // delimiter after it.
+ const auto *DelimiterToken = std::next(Builder.findToken(Range.getEnd()));
+ if (DelimiterToken->kind() == clang::tok::TokenKind::comma)
+ Builder.markChildToken(DelimiterToken, syntax::NodeRole::ListDelimiter);
+ } else {
+ auto *DL = new (allocator()) syntax::DeclaratorList;
+ auto DeclarationRange = Builder.getDeclarationRange(D);
+ Builder.foldList(DeclarationRange, DL, nullptr);
+
+ Builder.markChild(DL, syntax::NodeRole::Declarators);
+ Builder.foldNode(DeclarationRange,
new (allocator()) syntax::SimpleDeclaration, D);
}
return true;
}
/// Returns the range of the built node.
- syntax::TrailingReturnType *BuildTrailingReturn(FunctionProtoTypeLoc L) {
+ syntax::TrailingReturnType *buildTrailingReturn(FunctionProtoTypeLoc L) {
assert(L.getTypePtr()->hasTrailingReturn());
auto ReturnedType = L.getReturnLoc();
// Build node for the declarator, if any.
- auto ReturnDeclaratorRange =
- getDeclaratorRange(this->Builder.sourceManager(), ReturnedType,
- /*Name=*/SourceLocation(),
- /*Initializer=*/SourceLocation());
+ auto ReturnDeclaratorRange = SourceRange(GetStartLoc().Visit(ReturnedType),
+ ReturnedType.getEndLoc());
syntax::SimpleDeclarator *ReturnDeclarator = nullptr;
if (ReturnDeclaratorRange.isValid()) {
ReturnDeclarator = new (allocator()) syntax::SimpleDeclarator;
@@ -1238,8 +1603,7 @@ private:
auto Tokens = llvm::makeArrayRef(Arrow, Return.end());
Builder.markChildToken(Arrow, syntax::NodeRole::ArrowToken);
if (ReturnDeclarator)
- Builder.markChild(ReturnDeclarator,
- syntax::NodeRole::TrailingReturnType_declarator);
+ Builder.markChild(ReturnDeclarator, syntax::NodeRole::Declarator);
auto *R = new (allocator()) syntax::TrailingReturnType;
Builder.foldNode(Tokens, R, L);
return R;
@@ -1253,9 +1617,7 @@ private:
assert(TemplateKW && TemplateKW->kind() == tok::kw_template);
Builder.markChildToken(ExternKW, syntax::NodeRole::ExternKeyword);
Builder.markChildToken(TemplateKW, syntax::NodeRole::IntroducerKeyword);
- Builder.markChild(
- InnerDeclaration,
- syntax::NodeRole::ExplicitTemplateInstantiation_declaration);
+ Builder.markChild(InnerDeclaration, syntax::NodeRole::Declaration);
Builder.foldNode(
Range, new (allocator()) syntax::ExplicitTemplateInstantiation, From);
}
@@ -1268,7 +1630,7 @@ private:
auto *N = new (allocator()) syntax::TemplateDeclaration;
Builder.foldNode(Range, N, From);
- Builder.markChild(N, syntax::NodeRole::TemplateDeclaration_declaration);
+ Builder.markChild(N, syntax::NodeRole::Declaration);
return N;
}
@@ -1306,6 +1668,11 @@ void syntax::TreeBuilder::markChild(ASTPtr N, NodeRole R) {
assert(SN != nullptr);
setRole(SN, R);
}
+void syntax::TreeBuilder::markChild(NestedNameSpecifierLoc NNSLoc, NodeRole R) {
+ auto *SN = Mapping.find(NNSLoc);
+ assert(SN != nullptr);
+ setRole(SN, R);
+}
void syntax::TreeBuilder::markStmtChild(Stmt *Child, NodeRole Role) {
if (!Child)
@@ -1315,7 +1682,7 @@ void syntax::TreeBuilder::markStmtChild(Stmt *Child, NodeRole Role) {
if (Expr *ChildExpr = dyn_cast<Expr>(Child)) {
// This is an expression in a statement position, consume the trailing
// semicolon and form an 'ExpressionStatement' node.
- markExprChild(ChildExpr, NodeRole::ExpressionStatement_expression);
+ markExprChild(ChildExpr, NodeRole::Expression);
ChildNode = new (allocator()) syntax::ExpressionStatement;
// (!) 'getStmtRange()' ensures this covers a trailing semicolon.
Pending.foldChildren(Arena, getStmtRange(Child), ChildNode);
@@ -1329,7 +1696,7 @@ void syntax::TreeBuilder::markStmtChild(Stmt *Child, NodeRole Role) {
void syntax::TreeBuilder::markExprChild(Expr *Child, NodeRole Role) {
if (!Child)
return;
- Child = Child->IgnoreImplicit();
+ Child = IgnoreImplicit(Child);
syntax::Tree *ChildNode = Mapping.find(Child);
assert(ChildNode != nullptr);
@@ -1339,14 +1706,14 @@ void syntax::TreeBuilder::markExprChild(Expr *Child, NodeRole Role) {
const syntax::Token *syntax::TreeBuilder::findToken(SourceLocation L) const {
if (L.isInvalid())
return nullptr;
- auto It = LocationToToken.find(L.getRawEncoding());
+ auto It = LocationToToken.find(L);
assert(It != LocationToToken.end());
return It->second;
}
-syntax::TranslationUnit *
-syntax::buildSyntaxTree(Arena &A, const TranslationUnitDecl &TU) {
+syntax::TranslationUnit *syntax::buildSyntaxTree(Arena &A,
+ ASTContext &Context) {
TreeBuilder Builder(A);
- BuildTreeVisitor(TU.getASTContext(), Builder).TraverseAST(TU.getASTContext());
+ BuildTreeVisitor(Context, Builder).TraverseAST(Context);
return std::move(Builder).finalize();
}
diff --git a/clang/lib/Tooling/Syntax/ComputeReplacements.cpp b/clang/lib/Tooling/Syntax/ComputeReplacements.cpp
index 30b3ee17d092..31e1a40c74b6 100644
--- a/clang/lib/Tooling/Syntax/ComputeReplacements.cpp
+++ b/clang/lib/Tooling/Syntax/ComputeReplacements.cpp
@@ -32,13 +32,14 @@ void enumerateTokenSpans(const syntax::Tree *Root, ProcessTokensFn Callback) {
private:
void process(const syntax::Node *N) {
if (auto *T = dyn_cast<syntax::Tree>(N)) {
- for (auto *C = T->firstChild(); C != nullptr; C = C->nextSibling())
+ for (const auto *C = T->getFirstChild(); C != nullptr;
+ C = C->getNextSibling())
process(C);
return;
}
auto *L = cast<syntax::Leaf>(N);
- if (SpanEnd == L->token() && SpanIsOriginal == L->isOriginal()) {
+ if (SpanEnd == L->getToken() && SpanIsOriginal == L->isOriginal()) {
// Extend the current span.
++SpanEnd;
return;
@@ -47,7 +48,7 @@ void enumerateTokenSpans(const syntax::Tree *Root, ProcessTokensFn Callback) {
if (SpanBegin)
Callback(llvm::makeArrayRef(SpanBegin, SpanEnd), SpanIsOriginal);
// Start recording a new span.
- SpanBegin = L->token();
+ SpanBegin = L->getToken();
SpanEnd = SpanBegin + 1;
SpanIsOriginal = L->isOriginal();
}
@@ -63,8 +64,8 @@ void enumerateTokenSpans(const syntax::Tree *Root, ProcessTokensFn Callback) {
syntax::FileRange rangeOfExpanded(const syntax::Arena &A,
llvm::ArrayRef<syntax::Token> Expanded) {
- auto &Buffer = A.tokenBuffer();
- auto &SM = A.sourceManager();
+ const auto &Buffer = A.getTokenBuffer();
+ const auto &SM = A.getSourceManager();
// Check that \p Expanded actually points into expanded tokens.
assert(Buffer.expandedTokens().begin() <= Expanded.begin());
@@ -84,8 +85,8 @@ syntax::FileRange rangeOfExpanded(const syntax::Arena &A,
tooling::Replacements
syntax::computeReplacements(const syntax::Arena &A,
const syntax::TranslationUnit &TU) {
- auto &Buffer = A.tokenBuffer();
- auto &SM = A.sourceManager();
+ const auto &Buffer = A.getTokenBuffer();
+ const auto &SM = A.getSourceManager();
tooling::Replacements Replacements;
// Text inserted by the replacement we are building now.
diff --git a/clang/lib/Tooling/Syntax/Mutations.cpp b/clang/lib/Tooling/Syntax/Mutations.cpp
index 24048b297a11..f8a652219b22 100644
--- a/clang/lib/Tooling/Syntax/Mutations.cpp
+++ b/clang/lib/Tooling/Syntax/Mutations.cpp
@@ -30,14 +30,17 @@ public:
/// Add a new node with a specified role.
static void addAfter(syntax::Node *Anchor, syntax::Node *New, NodeRole Role) {
assert(Anchor != nullptr);
+ assert(Anchor->Parent != nullptr);
assert(New->Parent == nullptr);
assert(New->NextSibling == nullptr);
- assert(!New->isDetached());
+ assert(New->PreviousSibling == nullptr);
+ assert(New->isDetached());
assert(Role != NodeRole::Detached);
New->setRole(Role);
- auto *P = Anchor->parent();
- P->replaceChildRangeLowLevel(Anchor, Anchor, New);
+ auto *P = Anchor->getParent();
+ P->replaceChildRangeLowLevel(Anchor->getNextSibling(),
+ Anchor->getNextSibling(), New);
P->assertInvariants();
}
@@ -49,43 +52,36 @@ public:
assert(Old->canModify());
assert(New->Parent == nullptr);
assert(New->NextSibling == nullptr);
+ assert(New->PreviousSibling == nullptr);
assert(New->isDetached());
New->Role = Old->Role;
- auto *P = Old->parent();
- P->replaceChildRangeLowLevel(findPrevious(Old), Old->nextSibling(), New);
+ auto *P = Old->getParent();
+ P->replaceChildRangeLowLevel(Old, Old->getNextSibling(), New);
P->assertInvariants();
}
/// Completely remove the node from its parent.
static void remove(syntax::Node *N) {
- auto *P = N->parent();
- P->replaceChildRangeLowLevel(findPrevious(N), N->nextSibling(),
+ assert(N != nullptr);
+ assert(N->Parent != nullptr);
+ assert(N->canModify());
+
+ auto *P = N->getParent();
+ P->replaceChildRangeLowLevel(N, N->getNextSibling(),
/*New=*/nullptr);
P->assertInvariants();
N->assertInvariants();
}
-
-private:
- static syntax::Node *findPrevious(syntax::Node *N) {
- if (N->parent()->firstChild() == N)
- return nullptr;
- for (syntax::Node *C = N->parent()->firstChild(); C != nullptr;
- C = C->nextSibling()) {
- if (C->nextSibling() == N)
- return C;
- }
- llvm_unreachable("could not find a child node");
- }
};
void syntax::removeStatement(syntax::Arena &A, syntax::Statement *S) {
assert(S);
assert(S->canModify());
- if (isa<CompoundStatement>(S->parent())) {
+ if (isa<CompoundStatement>(S->getParent())) {
// A child of CompoundStatement can just be safely removed.
MutationsImpl::remove(S);
return;
diff --git a/clang/lib/Tooling/Syntax/Nodes.cpp b/clang/lib/Tooling/Syntax/Nodes.cpp
index 2435ae0a91dd..fc6f8ef1a82c 100644
--- a/clang/lib/Tooling/Syntax/Nodes.cpp
+++ b/clang/lib/Tooling/Syntax/Nodes.cpp
@@ -10,121 +10,17 @@
using namespace clang;
-llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeKind K) {
+raw_ostream &syntax::operator<<(raw_ostream &OS, NodeKind K) {
switch (K) {
- case NodeKind::Leaf:
- return OS << "Leaf";
- case NodeKind::TranslationUnit:
- return OS << "TranslationUnit";
- case NodeKind::UnknownExpression:
- return OS << "UnknownExpression";
- case NodeKind::ParenExpression:
- return OS << "ParenExpression";
- case NodeKind::IntegerLiteralExpression:
- return OS << "IntegerLiteralExpression";
- case NodeKind::CharacterLiteralExpression:
- return OS << "CharacterLiteralExpression";
- case NodeKind::FloatingLiteralExpression:
- return OS << "FloatingLiteralExpression";
- case NodeKind::StringLiteralExpression:
- return OS << "StringLiteralExpression";
- case NodeKind::BoolLiteralExpression:
- return OS << "BoolLiteralExpression";
- case NodeKind::CxxNullPtrExpression:
- return OS << "CxxNullPtrExpression";
- case NodeKind::IntegerUserDefinedLiteralExpression:
- return OS << "IntegerUserDefinedLiteralExpression";
- case NodeKind::FloatUserDefinedLiteralExpression:
- return OS << "FloatUserDefinedLiteralExpression";
- case NodeKind::CharUserDefinedLiteralExpression:
- return OS << "CharUserDefinedLiteralExpression";
- case NodeKind::StringUserDefinedLiteralExpression:
- return OS << "StringUserDefinedLiteralExpression";
- case NodeKind::PrefixUnaryOperatorExpression:
- return OS << "PrefixUnaryOperatorExpression";
- case NodeKind::PostfixUnaryOperatorExpression:
- return OS << "PostfixUnaryOperatorExpression";
- case NodeKind::BinaryOperatorExpression:
- return OS << "BinaryOperatorExpression";
- case NodeKind::UnqualifiedId:
- return OS << "UnqualifiedId";
- case NodeKind::IdExpression:
- return OS << "IdExpression";
- case NodeKind::UnknownStatement:
- return OS << "UnknownStatement";
- case NodeKind::DeclarationStatement:
- return OS << "DeclarationStatement";
- case NodeKind::EmptyStatement:
- return OS << "EmptyStatement";
- case NodeKind::SwitchStatement:
- return OS << "SwitchStatement";
- case NodeKind::CaseStatement:
- return OS << "CaseStatement";
- case NodeKind::DefaultStatement:
- return OS << "DefaultStatement";
- case NodeKind::IfStatement:
- return OS << "IfStatement";
- case NodeKind::ForStatement:
- return OS << "ForStatement";
- case NodeKind::WhileStatement:
- return OS << "WhileStatement";
- case NodeKind::ContinueStatement:
- return OS << "ContinueStatement";
- case NodeKind::BreakStatement:
- return OS << "BreakStatement";
- case NodeKind::ReturnStatement:
- return OS << "ReturnStatement";
- case NodeKind::RangeBasedForStatement:
- return OS << "RangeBasedForStatement";
- case NodeKind::ExpressionStatement:
- return OS << "ExpressionStatement";
- case NodeKind::CompoundStatement:
- return OS << "CompoundStatement";
- case NodeKind::UnknownDeclaration:
- return OS << "UnknownDeclaration";
- case NodeKind::EmptyDeclaration:
- return OS << "EmptyDeclaration";
- case NodeKind::StaticAssertDeclaration:
- return OS << "StaticAssertDeclaration";
- case NodeKind::LinkageSpecificationDeclaration:
- return OS << "LinkageSpecificationDeclaration";
- case NodeKind::SimpleDeclaration:
- return OS << "SimpleDeclaration";
- case NodeKind::TemplateDeclaration:
- return OS << "TemplateDeclaration";
- case NodeKind::ExplicitTemplateInstantiation:
- return OS << "ExplicitTemplateInstantiation";
- case NodeKind::NamespaceDefinition:
- return OS << "NamespaceDefinition";
- case NodeKind::NamespaceAliasDefinition:
- return OS << "NamespaceAliasDefinition";
- case NodeKind::UsingNamespaceDirective:
- return OS << "UsingNamespaceDirective";
- case NodeKind::UsingDeclaration:
- return OS << "UsingDeclaration";
- case NodeKind::TypeAliasDeclaration:
- return OS << "TypeAliasDeclaration";
- case NodeKind::SimpleDeclarator:
- return OS << "SimpleDeclarator";
- case NodeKind::ParenDeclarator:
- return OS << "ParenDeclarator";
- case NodeKind::ArraySubscript:
- return OS << "ArraySubscript";
- case NodeKind::TrailingReturnType:
- return OS << "TrailingReturnType";
- case NodeKind::ParametersAndQualifiers:
- return OS << "ParametersAndQualifiers";
- case NodeKind::MemberPointer:
- return OS << "MemberPointer";
- case NodeKind::NameSpecifier:
- return OS << "NameSpecifier";
- case NodeKind::NestedNameSpecifier:
- return OS << "NestedNameSpecifier";
+#define CONCRETE_NODE(Kind, Parent) \
+ case NodeKind::Kind: \
+ return OS << #Kind;
+#include "clang/Tooling/Syntax/Nodes.inc"
}
llvm_unreachable("unknown node kind");
}
-llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeRole R) {
+raw_ostream &syntax::operator<<(raw_ostream &OS, NodeRole R) {
switch (R) {
case syntax::NodeRole::Detached:
return OS << "Detached";
@@ -142,384 +38,404 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeRole R) {
return OS << "ArrowToken";
case syntax::NodeRole::ExternKeyword:
return OS << "ExternKeyword";
+ case syntax::NodeRole::TemplateKeyword:
+ return OS << "TemplateKeyword";
case syntax::NodeRole::BodyStatement:
return OS << "BodyStatement";
- case syntax::NodeRole::CaseStatement_value:
- return OS << "CaseStatement_value";
- case syntax::NodeRole::IfStatement_thenStatement:
- return OS << "IfStatement_thenStatement";
- case syntax::NodeRole::IfStatement_elseKeyword:
- return OS << "IfStatement_elseKeyword";
- case syntax::NodeRole::IfStatement_elseStatement:
- return OS << "IfStatement_elseStatement";
- case syntax::NodeRole::OperatorExpression_operatorToken:
- return OS << "OperatorExpression_operatorToken";
- case syntax::NodeRole::UnaryOperatorExpression_operand:
- return OS << "UnaryOperatorExpression_operand";
- case syntax::NodeRole::BinaryOperatorExpression_leftHandSide:
- return OS << "BinaryOperatorExpression_leftHandSide";
- case syntax::NodeRole::BinaryOperatorExpression_rightHandSide:
- return OS << "BinaryOperatorExpression_rightHandSide";
- case syntax::NodeRole::ReturnStatement_value:
- return OS << "ReturnStatement_value";
- case syntax::NodeRole::ExpressionStatement_expression:
- return OS << "ExpressionStatement_expression";
- case syntax::NodeRole::CompoundStatement_statement:
- return OS << "CompoundStatement_statement";
- case syntax::NodeRole::StaticAssertDeclaration_condition:
- return OS << "StaticAssertDeclaration_condition";
- case syntax::NodeRole::StaticAssertDeclaration_message:
- return OS << "StaticAssertDeclaration_message";
- case syntax::NodeRole::SimpleDeclaration_declarator:
- return OS << "SimpleDeclaration_declarator";
- case syntax::NodeRole::TemplateDeclaration_declaration:
- return OS << "TemplateDeclaration_declaration";
- case syntax::NodeRole::ExplicitTemplateInstantiation_declaration:
- return OS << "ExplicitTemplateInstantiation_declaration";
- case syntax::NodeRole::ArraySubscript_sizeExpression:
- return OS << "ArraySubscript_sizeExpression";
- case syntax::NodeRole::TrailingReturnType_declarator:
- return OS << "TrailingReturnType_declarator";
- case syntax::NodeRole::ParametersAndQualifiers_parameter:
- return OS << "ParametersAndQualifiers_parameter";
- case syntax::NodeRole::ParametersAndQualifiers_trailingReturn:
- return OS << "ParametersAndQualifiers_trailingReturn";
- case syntax::NodeRole::IdExpression_id:
- return OS << "IdExpression_id";
- case syntax::NodeRole::IdExpression_qualifier:
- return OS << "IdExpression_qualifier";
- case syntax::NodeRole::NestedNameSpecifier_specifier:
- return OS << "NestedNameSpecifier_specifier";
- case syntax::NodeRole::ParenExpression_subExpression:
- return OS << "ParenExpression_subExpression";
+ case syntax::NodeRole::ListElement:
+ return OS << "ListElement";
+ case syntax::NodeRole::ListDelimiter:
+ return OS << "ListDelimiter";
+ case syntax::NodeRole::CaseValue:
+ return OS << "CaseValue";
+ case syntax::NodeRole::ReturnValue:
+ return OS << "ReturnValue";
+ case syntax::NodeRole::ThenStatement:
+ return OS << "ThenStatement";
+ case syntax::NodeRole::ElseKeyword:
+ return OS << "ElseKeyword";
+ case syntax::NodeRole::ElseStatement:
+ return OS << "ElseStatement";
+ case syntax::NodeRole::OperatorToken:
+ return OS << "OperatorToken";
+ case syntax::NodeRole::Operand:
+ return OS << "Operand";
+ case syntax::NodeRole::LeftHandSide:
+ return OS << "LeftHandSide";
+ case syntax::NodeRole::RightHandSide:
+ return OS << "RightHandSide";
+ case syntax::NodeRole::Expression:
+ return OS << "Expression";
+ case syntax::NodeRole::Statement:
+ return OS << "Statement";
+ case syntax::NodeRole::Condition:
+ return OS << "Condition";
+ case syntax::NodeRole::Message:
+ return OS << "Message";
+ case syntax::NodeRole::Declarator:
+ return OS << "Declarator";
+ case syntax::NodeRole::Declaration:
+ return OS << "Declaration";
+ case syntax::NodeRole::Size:
+ return OS << "Size";
+ case syntax::NodeRole::Parameters:
+ return OS << "Parameters";
+ case syntax::NodeRole::TrailingReturn:
+ return OS << "TrailingReturn";
+ case syntax::NodeRole::UnqualifiedId:
+ return OS << "UnqualifiedId";
+ case syntax::NodeRole::Qualifier:
+ return OS << "Qualifier";
+ case syntax::NodeRole::SubExpression:
+ return OS << "SubExpression";
+ case syntax::NodeRole::Object:
+ return OS << "Object";
+ case syntax::NodeRole::AccessToken:
+ return OS << "AccessToken";
+ case syntax::NodeRole::Member:
+ return OS << "Member";
+ case syntax::NodeRole::Callee:
+ return OS << "Callee";
+ case syntax::NodeRole::Arguments:
+ return OS << "Arguments";
+ case syntax::NodeRole::Declarators:
+ return OS << "Declarators";
}
llvm_unreachable("invalid role");
}
-std::vector<syntax::NameSpecifier *> syntax::NestedNameSpecifier::specifiers() {
+// We could have an interator in list to not pay memory costs of temporary
+// vector
+std::vector<syntax::NameSpecifier *>
+syntax::NestedNameSpecifier::getSpecifiers() {
+ auto SpecifiersAsNodes = getElementsAsNodes();
std::vector<syntax::NameSpecifier *> Children;
- for (auto *C = firstChild(); C; C = C->nextSibling()) {
- assert(C->role() == syntax::NodeRole::NestedNameSpecifier_specifier);
- Children.push_back(llvm::cast<syntax::NameSpecifier>(C));
+ for (const auto &Element : SpecifiersAsNodes) {
+ Children.push_back(llvm::cast<syntax::NameSpecifier>(Element));
}
return Children;
}
-syntax::NestedNameSpecifier *syntax::IdExpression::qualifier() {
- return llvm::cast_or_null<syntax::NestedNameSpecifier>(
- findChild(syntax::NodeRole::IdExpression_qualifier));
-}
-
-syntax::UnqualifiedId *syntax::IdExpression::unqualifiedId() {
- return llvm::cast_or_null<syntax::UnqualifiedId>(
- findChild(syntax::NodeRole::IdExpression_id));
-}
-
-syntax::Leaf *syntax::ParenExpression::openParen() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::OpenParen));
-}
-
-syntax::Expression *syntax::ParenExpression::subExpression() {
- return llvm::cast_or_null<syntax::Expression>(
- findChild(syntax::NodeRole::ParenExpression_subExpression));
-}
-
-syntax::Leaf *syntax::ParenExpression::closeParen() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::CloseParen));
-}
-
-syntax::Leaf *syntax::IntegerLiteralExpression::literalToken() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::LiteralToken));
+std::vector<syntax::List::ElementAndDelimiter<syntax::NameSpecifier>>
+syntax::NestedNameSpecifier::getSpecifiersAndDoubleColons() {
+ auto SpecifiersAsNodesAndDoubleColons = getElementsAsNodesAndDelimiters();
+ std::vector<syntax::List::ElementAndDelimiter<syntax::NameSpecifier>>
+ Children;
+ for (const auto &SpecifierAndDoubleColon : SpecifiersAsNodesAndDoubleColons) {
+ Children.push_back(
+ {llvm::cast<syntax::NameSpecifier>(SpecifierAndDoubleColon.element),
+ SpecifierAndDoubleColon.delimiter});
+ }
+ return Children;
}
-syntax::Leaf *syntax::CharacterLiteralExpression::literalToken() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::LiteralToken));
+std::vector<syntax::Expression *> syntax::CallArguments::getArguments() {
+ auto ArgumentsAsNodes = getElementsAsNodes();
+ std::vector<syntax::Expression *> Children;
+ for (const auto &ArgumentAsNode : ArgumentsAsNodes) {
+ Children.push_back(llvm::cast<syntax::Expression>(ArgumentAsNode));
+ }
+ return Children;
}
-syntax::Leaf *syntax::FloatingLiteralExpression::literalToken() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::LiteralToken));
+std::vector<syntax::List::ElementAndDelimiter<syntax::Expression>>
+syntax::CallArguments::getArgumentsAndCommas() {
+ auto ArgumentsAsNodesAndCommas = getElementsAsNodesAndDelimiters();
+ std::vector<syntax::List::ElementAndDelimiter<syntax::Expression>> Children;
+ for (const auto &ArgumentAsNodeAndComma : ArgumentsAsNodesAndCommas) {
+ Children.push_back(
+ {llvm::cast<syntax::Expression>(ArgumentAsNodeAndComma.element),
+ ArgumentAsNodeAndComma.delimiter});
+ }
+ return Children;
}
-syntax::Leaf *syntax::StringLiteralExpression::literalToken() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::LiteralToken));
+std::vector<syntax::SimpleDeclaration *>
+syntax::ParameterDeclarationList::getParameterDeclarations() {
+ auto ParametersAsNodes = getElementsAsNodes();
+ std::vector<syntax::SimpleDeclaration *> Children;
+ for (const auto &ParameterAsNode : ParametersAsNodes) {
+ Children.push_back(llvm::cast<syntax::SimpleDeclaration>(ParameterAsNode));
+ }
+ return Children;
}
-syntax::Leaf *syntax::BoolLiteralExpression::literalToken() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::LiteralToken));
+std::vector<syntax::List::ElementAndDelimiter<syntax::SimpleDeclaration>>
+syntax::ParameterDeclarationList::getParametersAndCommas() {
+ auto ParametersAsNodesAndCommas = getElementsAsNodesAndDelimiters();
+ std::vector<syntax::List::ElementAndDelimiter<syntax::SimpleDeclaration>>
+ Children;
+ for (const auto &ParameterAsNodeAndComma : ParametersAsNodesAndCommas) {
+ Children.push_back(
+ {llvm::cast<syntax::SimpleDeclaration>(ParameterAsNodeAndComma.element),
+ ParameterAsNodeAndComma.delimiter});
+ }
+ return Children;
}
-syntax::Leaf *syntax::CxxNullPtrExpression::nullPtrKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::LiteralToken));
+std::vector<syntax::SimpleDeclarator *>
+syntax::DeclaratorList::getDeclarators() {
+ auto DeclaratorsAsNodes = getElementsAsNodes();
+ std::vector<syntax::SimpleDeclarator *> Children;
+ for (const auto &DeclaratorAsNode : DeclaratorsAsNodes) {
+ Children.push_back(llvm::cast<syntax::SimpleDeclarator>(DeclaratorAsNode));
+ }
+ return Children;
}
-syntax::Leaf *syntax::UserDefinedLiteralExpression::literalToken() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::LiteralToken));
+std::vector<syntax::List::ElementAndDelimiter<syntax::SimpleDeclarator>>
+syntax::DeclaratorList::getDeclaratorsAndCommas() {
+ auto DeclaratorsAsNodesAndCommas = getElementsAsNodesAndDelimiters();
+ std::vector<syntax::List::ElementAndDelimiter<syntax::SimpleDeclarator>>
+ Children;
+ for (const auto &DeclaratorAsNodeAndComma : DeclaratorsAsNodesAndCommas) {
+ Children.push_back(
+ {llvm::cast<syntax::SimpleDeclarator>(DeclaratorAsNodeAndComma.element),
+ DeclaratorAsNodeAndComma.delimiter});
+ }
+ return Children;
}
-syntax::Expression *syntax::BinaryOperatorExpression::lhs() {
- return llvm::cast_or_null<syntax::Expression>(
- findChild(syntax::NodeRole::BinaryOperatorExpression_leftHandSide));
+syntax::Expression *syntax::BinaryOperatorExpression::getLhs() {
+ return cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::LeftHandSide));
}
-syntax::Leaf *syntax::UnaryOperatorExpression::operatorToken() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::OperatorExpression_operatorToken));
+syntax::Leaf *syntax::UnaryOperatorExpression::getOperatorToken() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::OperatorToken));
}
-syntax::Expression *syntax::UnaryOperatorExpression::operand() {
- return llvm::cast_or_null<syntax::Expression>(
- findChild(syntax::NodeRole::UnaryOperatorExpression_operand));
+syntax::Expression *syntax::UnaryOperatorExpression::getOperand() {
+ return cast_or_null<syntax::Expression>(findChild(syntax::NodeRole::Operand));
}
-syntax::Leaf *syntax::BinaryOperatorExpression::operatorToken() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::OperatorExpression_operatorToken));
+syntax::Leaf *syntax::BinaryOperatorExpression::getOperatorToken() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::OperatorToken));
}
-syntax::Expression *syntax::BinaryOperatorExpression::rhs() {
- return llvm::cast_or_null<syntax::Expression>(
- findChild(syntax::NodeRole::BinaryOperatorExpression_rightHandSide));
+syntax::Expression *syntax::BinaryOperatorExpression::getRhs() {
+ return cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::RightHandSide));
}
-syntax::Leaf *syntax::SwitchStatement::switchKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::SwitchStatement::getSwitchKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Statement *syntax::SwitchStatement::body() {
- return llvm::cast_or_null<syntax::Statement>(
+syntax::Statement *syntax::SwitchStatement::getBody() {
+ return cast_or_null<syntax::Statement>(
findChild(syntax::NodeRole::BodyStatement));
}
-syntax::Leaf *syntax::CaseStatement::caseKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::CaseStatement::getCaseKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Expression *syntax::CaseStatement::value() {
- return llvm::cast_or_null<syntax::Expression>(
- findChild(syntax::NodeRole::CaseStatement_value));
+syntax::Expression *syntax::CaseStatement::getCaseValue() {
+ return cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::CaseValue));
}
-syntax::Statement *syntax::CaseStatement::body() {
- return llvm::cast_or_null<syntax::Statement>(
+syntax::Statement *syntax::CaseStatement::getBody() {
+ return cast_or_null<syntax::Statement>(
findChild(syntax::NodeRole::BodyStatement));
}
-syntax::Leaf *syntax::DefaultStatement::defaultKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::DefaultStatement::getDefaultKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Statement *syntax::DefaultStatement::body() {
- return llvm::cast_or_null<syntax::Statement>(
+syntax::Statement *syntax::DefaultStatement::getBody() {
+ return cast_or_null<syntax::Statement>(
findChild(syntax::NodeRole::BodyStatement));
}
-syntax::Leaf *syntax::IfStatement::ifKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::IfStatement::getIfKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Statement *syntax::IfStatement::thenStatement() {
- return llvm::cast_or_null<syntax::Statement>(
- findChild(syntax::NodeRole::IfStatement_thenStatement));
+syntax::Statement *syntax::IfStatement::getThenStatement() {
+ return cast_or_null<syntax::Statement>(
+ findChild(syntax::NodeRole::ThenStatement));
}
-syntax::Leaf *syntax::IfStatement::elseKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::IfStatement_elseKeyword));
+syntax::Leaf *syntax::IfStatement::getElseKeyword() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::ElseKeyword));
}
-syntax::Statement *syntax::IfStatement::elseStatement() {
- return llvm::cast_or_null<syntax::Statement>(
- findChild(syntax::NodeRole::IfStatement_elseStatement));
+syntax::Statement *syntax::IfStatement::getElseStatement() {
+ return cast_or_null<syntax::Statement>(
+ findChild(syntax::NodeRole::ElseStatement));
}
-syntax::Leaf *syntax::ForStatement::forKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::ForStatement::getForKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Statement *syntax::ForStatement::body() {
- return llvm::cast_or_null<syntax::Statement>(
+syntax::Statement *syntax::ForStatement::getBody() {
+ return cast_or_null<syntax::Statement>(
findChild(syntax::NodeRole::BodyStatement));
}
-syntax::Leaf *syntax::WhileStatement::whileKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::WhileStatement::getWhileKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Statement *syntax::WhileStatement::body() {
- return llvm::cast_or_null<syntax::Statement>(
+syntax::Statement *syntax::WhileStatement::getBody() {
+ return cast_or_null<syntax::Statement>(
findChild(syntax::NodeRole::BodyStatement));
}
-syntax::Leaf *syntax::ContinueStatement::continueKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::ContinueStatement::getContinueKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Leaf *syntax::BreakStatement::breakKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::BreakStatement::getBreakKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Leaf *syntax::ReturnStatement::returnKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::ReturnStatement::getReturnKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Expression *syntax::ReturnStatement::value() {
- return llvm::cast_or_null<syntax::Expression>(
- findChild(syntax::NodeRole::ReturnStatement_value));
+syntax::Expression *syntax::ReturnStatement::getReturnValue() {
+ return cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::ReturnValue));
}
-syntax::Leaf *syntax::RangeBasedForStatement::forKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::RangeBasedForStatement::getForKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Statement *syntax::RangeBasedForStatement::body() {
- return llvm::cast_or_null<syntax::Statement>(
+syntax::Statement *syntax::RangeBasedForStatement::getBody() {
+ return cast_or_null<syntax::Statement>(
findChild(syntax::NodeRole::BodyStatement));
}
-syntax::Expression *syntax::ExpressionStatement::expression() {
- return llvm::cast_or_null<syntax::Expression>(
- findChild(syntax::NodeRole::ExpressionStatement_expression));
+syntax::Expression *syntax::ExpressionStatement::getExpression() {
+ return cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::Expression));
}
-syntax::Leaf *syntax::CompoundStatement::lbrace() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::OpenParen));
+syntax::Leaf *syntax::CompoundStatement::getLbrace() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::OpenParen));
}
-std::vector<syntax::Statement *> syntax::CompoundStatement::statements() {
+std::vector<syntax::Statement *> syntax::CompoundStatement::getStatements() {
std::vector<syntax::Statement *> Children;
- for (auto *C = firstChild(); C; C = C->nextSibling()) {
- assert(C->role() == syntax::NodeRole::CompoundStatement_statement);
- Children.push_back(llvm::cast<syntax::Statement>(C));
+ for (auto *C = getFirstChild(); C; C = C->getNextSibling()) {
+ assert(C->getRole() == syntax::NodeRole::Statement);
+ Children.push_back(cast<syntax::Statement>(C));
}
return Children;
}
-syntax::Leaf *syntax::CompoundStatement::rbrace() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::CloseParen));
+syntax::Leaf *syntax::CompoundStatement::getRbrace() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::CloseParen));
}
-syntax::Expression *syntax::StaticAssertDeclaration::condition() {
- return llvm::cast_or_null<syntax::Expression>(
- findChild(syntax::NodeRole::StaticAssertDeclaration_condition));
+syntax::Expression *syntax::StaticAssertDeclaration::getCondition() {
+ return cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::Condition));
}
-syntax::Expression *syntax::StaticAssertDeclaration::message() {
- return llvm::cast_or_null<syntax::Expression>(
- findChild(syntax::NodeRole::StaticAssertDeclaration_message));
+syntax::Expression *syntax::StaticAssertDeclaration::getMessage() {
+ return cast_or_null<syntax::Expression>(findChild(syntax::NodeRole::Message));
}
std::vector<syntax::SimpleDeclarator *>
-syntax::SimpleDeclaration::declarators() {
+syntax::SimpleDeclaration::getDeclarators() {
std::vector<syntax::SimpleDeclarator *> Children;
- for (auto *C = firstChild(); C; C = C->nextSibling()) {
- if (C->role() == syntax::NodeRole::SimpleDeclaration_declarator)
- Children.push_back(llvm::cast<syntax::SimpleDeclarator>(C));
+ for (auto *C = getFirstChild(); C; C = C->getNextSibling()) {
+ if (C->getRole() == syntax::NodeRole::Declarator)
+ Children.push_back(cast<syntax::SimpleDeclarator>(C));
}
return Children;
}
-syntax::Leaf *syntax::TemplateDeclaration::templateKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::TemplateDeclaration::getTemplateKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Declaration *syntax::TemplateDeclaration::declaration() {
- return llvm::cast_or_null<syntax::Declaration>(
- findChild(syntax::NodeRole::TemplateDeclaration_declaration));
+syntax::Declaration *syntax::TemplateDeclaration::getDeclaration() {
+ return cast_or_null<syntax::Declaration>(
+ findChild(syntax::NodeRole::Declaration));
}
-syntax::Leaf *syntax::ExplicitTemplateInstantiation::templateKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
+syntax::Leaf *syntax::ExplicitTemplateInstantiation::getTemplateKeyword() {
+ return cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
}
-syntax::Leaf *syntax::ExplicitTemplateInstantiation::externKeyword() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::ExternKeyword));
+syntax::Leaf *syntax::ExplicitTemplateInstantiation::getExternKeyword() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::ExternKeyword));
}
-syntax::Declaration *syntax::ExplicitTemplateInstantiation::declaration() {
- return llvm::cast_or_null<syntax::Declaration>(
- findChild(syntax::NodeRole::ExplicitTemplateInstantiation_declaration));
+syntax::Declaration *syntax::ExplicitTemplateInstantiation::getDeclaration() {
+ return cast_or_null<syntax::Declaration>(
+ findChild(syntax::NodeRole::Declaration));
}
-syntax::Leaf *syntax::ParenDeclarator::lparen() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::OpenParen));
+syntax::Leaf *syntax::ParenDeclarator::getLparen() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::OpenParen));
}
-syntax::Leaf *syntax::ParenDeclarator::rparen() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::CloseParen));
+syntax::Leaf *syntax::ParenDeclarator::getRparen() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::CloseParen));
}
-syntax::Leaf *syntax::ArraySubscript::lbracket() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::OpenParen));
+syntax::Leaf *syntax::ArraySubscript::getLbracket() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::OpenParen));
}
-syntax::Expression *syntax::ArraySubscript::sizeExpression() {
- return llvm::cast_or_null<syntax::Expression>(
- findChild(syntax::NodeRole::ArraySubscript_sizeExpression));
+syntax::Expression *syntax::ArraySubscript::getSize() {
+ return cast_or_null<syntax::Expression>(findChild(syntax::NodeRole::Size));
}
-syntax::Leaf *syntax::ArraySubscript::rbracket() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::CloseParen));
+syntax::Leaf *syntax::ArraySubscript::getRbracket() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::CloseParen));
}
-syntax::Leaf *syntax::TrailingReturnType::arrowToken() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::ArrowToken));
+syntax::Leaf *syntax::TrailingReturnType::getArrowToken() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::ArrowToken));
}
-syntax::SimpleDeclarator *syntax::TrailingReturnType::declarator() {
- return llvm::cast_or_null<syntax::SimpleDeclarator>(
- findChild(syntax::NodeRole::TrailingReturnType_declarator));
+syntax::SimpleDeclarator *syntax::TrailingReturnType::getDeclarator() {
+ return cast_or_null<syntax::SimpleDeclarator>(
+ findChild(syntax::NodeRole::Declarator));
}
-syntax::Leaf *syntax::ParametersAndQualifiers::lparen() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::OpenParen));
+syntax::Leaf *syntax::ParametersAndQualifiers::getLparen() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::OpenParen));
}
-std::vector<syntax::SimpleDeclaration *>
-syntax::ParametersAndQualifiers::parameters() {
- std::vector<syntax::SimpleDeclaration *> Children;
- for (auto *C = firstChild(); C; C = C->nextSibling()) {
- if (C->role() == syntax::NodeRole::ParametersAndQualifiers_parameter)
- Children.push_back(llvm::cast<syntax::SimpleDeclaration>(C));
- }
- return Children;
+syntax::ParameterDeclarationList *
+syntax::ParametersAndQualifiers::getParameters() {
+ return cast_or_null<syntax::ParameterDeclarationList>(
+ findChild(syntax::NodeRole::Parameters));
}
-syntax::Leaf *syntax::ParametersAndQualifiers::rparen() {
- return llvm::cast_or_null<syntax::Leaf>(
- findChild(syntax::NodeRole::CloseParen));
+syntax::Leaf *syntax::ParametersAndQualifiers::getRparen() {
+ return cast_or_null<syntax::Leaf>(findChild(syntax::NodeRole::CloseParen));
}
-syntax::TrailingReturnType *syntax::ParametersAndQualifiers::trailingReturn() {
- return llvm::cast_or_null<syntax::TrailingReturnType>(
- findChild(syntax::NodeRole::ParametersAndQualifiers_trailingReturn));
+syntax::TrailingReturnType *
+syntax::ParametersAndQualifiers::getTrailingReturn() {
+ return cast_or_null<syntax::TrailingReturnType>(
+ findChild(syntax::NodeRole::TrailingReturn));
}
+
+#define NODE(Kind, Parent) \
+ static_assert(sizeof(syntax::Kind) > 0, "Missing Node subclass definition");
+#include "clang/Tooling/Syntax/Nodes.inc"
diff --git a/clang/lib/Tooling/Syntax/Synthesis.cpp b/clang/lib/Tooling/Syntax/Synthesis.cpp
index aa01a34c761f..ef6492882be6 100644
--- a/clang/lib/Tooling/Syntax/Synthesis.cpp
+++ b/clang/lib/Tooling/Syntax/Synthesis.cpp
@@ -5,13 +5,15 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/TokenKinds.h"
#include "clang/Tooling/Syntax/BuildTree.h"
+#include "clang/Tooling/Syntax/Tree.h"
using namespace clang;
/// Exposes private syntax tree APIs required to implement node synthesis.
/// Should not be used for anything else.
-class syntax::FactoryImpl {
+class clang::syntax::FactoryImpl {
public:
static void setCanModify(syntax::Node *N) { N->CanModify = true; }
@@ -19,27 +21,211 @@ public:
syntax::NodeRole R) {
T->prependChildLowLevel(Child, R);
}
+ static void appendChildLowLevel(syntax::Tree *T, syntax::Node *Child,
+ syntax::NodeRole R) {
+ T->appendChildLowLevel(Child, R);
+ }
+
+ static std::pair<FileID, ArrayRef<Token>>
+ lexBuffer(syntax::Arena &A, std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+ return A.lexBuffer(std::move(Buffer));
+ }
};
-clang::syntax::Leaf *syntax::createPunctuation(clang::syntax::Arena &A,
- clang::tok::TokenKind K) {
- auto Tokens = A.lexBuffer(llvm::MemoryBuffer::getMemBuffer(
- clang::tok::getPunctuatorSpelling(K)))
- .second;
+// FIXME: `createLeaf` is based on `syntax::tokenize` internally, as such it
+// doesn't support digraphs or line continuations.
+syntax::Leaf *clang::syntax::createLeaf(syntax::Arena &A, tok::TokenKind K,
+ StringRef Spelling) {
+ auto Tokens =
+ FactoryImpl::lexBuffer(A, llvm::MemoryBuffer::getMemBufferCopy(Spelling))
+ .second;
assert(Tokens.size() == 1);
- assert(Tokens.front().kind() == K);
- auto *L = new (A.allocator()) clang::syntax::Leaf(Tokens.begin());
- FactoryImpl::setCanModify(L);
- L->assertInvariants();
- return L;
+ assert(Tokens.front().kind() == K &&
+ "spelling is not lexed into the expected kind of token");
+
+ auto *Leaf = new (A.getAllocator()) syntax::Leaf(Tokens.begin());
+ syntax::FactoryImpl::setCanModify(Leaf);
+ Leaf->assertInvariants();
+ return Leaf;
+}
+
+syntax::Leaf *clang::syntax::createLeaf(syntax::Arena &A, tok::TokenKind K) {
+ const auto *Spelling = tok::getPunctuatorSpelling(K);
+ if (!Spelling)
+ Spelling = tok::getKeywordSpelling(K);
+ assert(Spelling &&
+ "Cannot infer the spelling of the token from its token kind.");
+ return createLeaf(A, K, Spelling);
+}
+
+namespace {
+// Allocates the concrete syntax `Tree` according to its `NodeKind`.
+syntax::Tree *allocateTree(syntax::Arena &A, syntax::NodeKind Kind) {
+ switch (Kind) {
+ case syntax::NodeKind::Leaf:
+ assert(false);
+ break;
+ case syntax::NodeKind::TranslationUnit:
+ return new (A.getAllocator()) syntax::TranslationUnit;
+ case syntax::NodeKind::UnknownExpression:
+ return new (A.getAllocator()) syntax::UnknownExpression;
+ case syntax::NodeKind::ParenExpression:
+ return new (A.getAllocator()) syntax::ParenExpression;
+ case syntax::NodeKind::ThisExpression:
+ return new (A.getAllocator()) syntax::ThisExpression;
+ case syntax::NodeKind::IntegerLiteralExpression:
+ return new (A.getAllocator()) syntax::IntegerLiteralExpression;
+ case syntax::NodeKind::CharacterLiteralExpression:
+ return new (A.getAllocator()) syntax::CharacterLiteralExpression;
+ case syntax::NodeKind::FloatingLiteralExpression:
+ return new (A.getAllocator()) syntax::FloatingLiteralExpression;
+ case syntax::NodeKind::StringLiteralExpression:
+ return new (A.getAllocator()) syntax::StringLiteralExpression;
+ case syntax::NodeKind::BoolLiteralExpression:
+ return new (A.getAllocator()) syntax::BoolLiteralExpression;
+ case syntax::NodeKind::CxxNullPtrExpression:
+ return new (A.getAllocator()) syntax::CxxNullPtrExpression;
+ case syntax::NodeKind::IntegerUserDefinedLiteralExpression:
+ return new (A.getAllocator()) syntax::IntegerUserDefinedLiteralExpression;
+ case syntax::NodeKind::FloatUserDefinedLiteralExpression:
+ return new (A.getAllocator()) syntax::FloatUserDefinedLiteralExpression;
+ case syntax::NodeKind::CharUserDefinedLiteralExpression:
+ return new (A.getAllocator()) syntax::CharUserDefinedLiteralExpression;
+ case syntax::NodeKind::StringUserDefinedLiteralExpression:
+ return new (A.getAllocator()) syntax::StringUserDefinedLiteralExpression;
+ case syntax::NodeKind::PrefixUnaryOperatorExpression:
+ return new (A.getAllocator()) syntax::PrefixUnaryOperatorExpression;
+ case syntax::NodeKind::PostfixUnaryOperatorExpression:
+ return new (A.getAllocator()) syntax::PostfixUnaryOperatorExpression;
+ case syntax::NodeKind::BinaryOperatorExpression:
+ return new (A.getAllocator()) syntax::BinaryOperatorExpression;
+ case syntax::NodeKind::UnqualifiedId:
+ return new (A.getAllocator()) syntax::UnqualifiedId;
+ case syntax::NodeKind::IdExpression:
+ return new (A.getAllocator()) syntax::IdExpression;
+ case syntax::NodeKind::CallExpression:
+ return new (A.getAllocator()) syntax::CallExpression;
+ case syntax::NodeKind::UnknownStatement:
+ return new (A.getAllocator()) syntax::UnknownStatement;
+ case syntax::NodeKind::DeclarationStatement:
+ return new (A.getAllocator()) syntax::DeclarationStatement;
+ case syntax::NodeKind::EmptyStatement:
+ return new (A.getAllocator()) syntax::EmptyStatement;
+ case syntax::NodeKind::SwitchStatement:
+ return new (A.getAllocator()) syntax::SwitchStatement;
+ case syntax::NodeKind::CaseStatement:
+ return new (A.getAllocator()) syntax::CaseStatement;
+ case syntax::NodeKind::DefaultStatement:
+ return new (A.getAllocator()) syntax::DefaultStatement;
+ case syntax::NodeKind::IfStatement:
+ return new (A.getAllocator()) syntax::IfStatement;
+ case syntax::NodeKind::ForStatement:
+ return new (A.getAllocator()) syntax::ForStatement;
+ case syntax::NodeKind::WhileStatement:
+ return new (A.getAllocator()) syntax::WhileStatement;
+ case syntax::NodeKind::ContinueStatement:
+ return new (A.getAllocator()) syntax::ContinueStatement;
+ case syntax::NodeKind::BreakStatement:
+ return new (A.getAllocator()) syntax::BreakStatement;
+ case syntax::NodeKind::ReturnStatement:
+ return new (A.getAllocator()) syntax::ReturnStatement;
+ case syntax::NodeKind::RangeBasedForStatement:
+ return new (A.getAllocator()) syntax::RangeBasedForStatement;
+ case syntax::NodeKind::ExpressionStatement:
+ return new (A.getAllocator()) syntax::ExpressionStatement;
+ case syntax::NodeKind::CompoundStatement:
+ return new (A.getAllocator()) syntax::CompoundStatement;
+ case syntax::NodeKind::UnknownDeclaration:
+ return new (A.getAllocator()) syntax::UnknownDeclaration;
+ case syntax::NodeKind::EmptyDeclaration:
+ return new (A.getAllocator()) syntax::EmptyDeclaration;
+ case syntax::NodeKind::StaticAssertDeclaration:
+ return new (A.getAllocator()) syntax::StaticAssertDeclaration;
+ case syntax::NodeKind::LinkageSpecificationDeclaration:
+ return new (A.getAllocator()) syntax::LinkageSpecificationDeclaration;
+ case syntax::NodeKind::SimpleDeclaration:
+ return new (A.getAllocator()) syntax::SimpleDeclaration;
+ case syntax::NodeKind::TemplateDeclaration:
+ return new (A.getAllocator()) syntax::TemplateDeclaration;
+ case syntax::NodeKind::ExplicitTemplateInstantiation:
+ return new (A.getAllocator()) syntax::ExplicitTemplateInstantiation;
+ case syntax::NodeKind::NamespaceDefinition:
+ return new (A.getAllocator()) syntax::NamespaceDefinition;
+ case syntax::NodeKind::NamespaceAliasDefinition:
+ return new (A.getAllocator()) syntax::NamespaceAliasDefinition;
+ case syntax::NodeKind::UsingNamespaceDirective:
+ return new (A.getAllocator()) syntax::UsingNamespaceDirective;
+ case syntax::NodeKind::UsingDeclaration:
+ return new (A.getAllocator()) syntax::UsingDeclaration;
+ case syntax::NodeKind::TypeAliasDeclaration:
+ return new (A.getAllocator()) syntax::TypeAliasDeclaration;
+ case syntax::NodeKind::SimpleDeclarator:
+ return new (A.getAllocator()) syntax::SimpleDeclarator;
+ case syntax::NodeKind::ParenDeclarator:
+ return new (A.getAllocator()) syntax::ParenDeclarator;
+ case syntax::NodeKind::ArraySubscript:
+ return new (A.getAllocator()) syntax::ArraySubscript;
+ case syntax::NodeKind::TrailingReturnType:
+ return new (A.getAllocator()) syntax::TrailingReturnType;
+ case syntax::NodeKind::ParametersAndQualifiers:
+ return new (A.getAllocator()) syntax::ParametersAndQualifiers;
+ case syntax::NodeKind::MemberPointer:
+ return new (A.getAllocator()) syntax::MemberPointer;
+ case syntax::NodeKind::GlobalNameSpecifier:
+ return new (A.getAllocator()) syntax::GlobalNameSpecifier;
+ case syntax::NodeKind::DecltypeNameSpecifier:
+ return new (A.getAllocator()) syntax::DecltypeNameSpecifier;
+ case syntax::NodeKind::IdentifierNameSpecifier:
+ return new (A.getAllocator()) syntax::IdentifierNameSpecifier;
+ case syntax::NodeKind::SimpleTemplateNameSpecifier:
+ return new (A.getAllocator()) syntax::SimpleTemplateNameSpecifier;
+ case syntax::NodeKind::NestedNameSpecifier:
+ return new (A.getAllocator()) syntax::NestedNameSpecifier;
+ case syntax::NodeKind::MemberExpression:
+ return new (A.getAllocator()) syntax::MemberExpression;
+ case syntax::NodeKind::CallArguments:
+ return new (A.getAllocator()) syntax::CallArguments;
+ case syntax::NodeKind::ParameterDeclarationList:
+ return new (A.getAllocator()) syntax::ParameterDeclarationList;
+ case syntax::NodeKind::DeclaratorList:
+ return new (A.getAllocator()) syntax::DeclaratorList;
+ }
+ llvm_unreachable("unknown node kind");
+}
+} // namespace
+
+syntax::Tree *clang::syntax::createTree(
+ syntax::Arena &A,
+ ArrayRef<std::pair<syntax::Node *, syntax::NodeRole>> Children,
+ syntax::NodeKind K) {
+ auto *T = allocateTree(A, K);
+ FactoryImpl::setCanModify(T);
+ for (const auto &Child : Children)
+ FactoryImpl::appendChildLowLevel(T, Child.first, Child.second);
+
+ T->assertInvariants();
+ return T;
+}
+
+syntax::Node *clang::syntax::deepCopyExpandingMacros(syntax::Arena &A,
+ const syntax::Node *N) {
+ if (const auto *L = dyn_cast<syntax::Leaf>(N))
+ // `L->getToken()` gives us the expanded token, thus we implicitly expand
+ // any macros here.
+ return createLeaf(A, L->getToken()->kind(),
+ L->getToken()->text(A.getSourceManager()));
+
+ const auto *T = cast<syntax::Tree>(N);
+ std::vector<std::pair<syntax::Node *, syntax::NodeRole>> Children;
+ for (const auto *Child = T->getFirstChild(); Child;
+ Child = Child->getNextSibling())
+ Children.push_back({deepCopyExpandingMacros(A, Child), Child->getRole()});
+
+ return createTree(A, Children, N->getKind());
}
-clang::syntax::EmptyStatement *
-syntax::createEmptyStatement(clang::syntax::Arena &A) {
- auto *S = new (A.allocator()) clang::syntax::EmptyStatement;
- FactoryImpl::setCanModify(S);
- FactoryImpl::prependChildLowLevel(S, createPunctuation(A, clang::tok::semi),
- NodeRole::Unknown);
- S->assertInvariants();
- return S;
+syntax::EmptyStatement *clang::syntax::createEmptyStatement(syntax::Arena &A) {
+ return cast<EmptyStatement>(
+ createTree(A, {{createLeaf(A, tok::semi), NodeRole::Unknown}},
+ NodeKind::EmptyStatement));
}
diff --git a/clang/lib/Tooling/Syntax/Tokens.cpp b/clang/lib/Tooling/Syntax/Tokens.cpp
index c6b904822b8b..234df9cb7182 100644
--- a/clang/lib/Tooling/Syntax/Tokens.cpp
+++ b/clang/lib/Tooling/Syntax/Tokens.cpp
@@ -249,22 +249,7 @@ llvm::SmallVector<llvm::ArrayRef<syntax::Token>, 1>
TokenBuffer::expandedForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const {
if (Spelled.empty())
return {};
- assert(Spelled.front().location().isFileID());
-
- auto FID = sourceManager().getFileID(Spelled.front().location());
- auto It = Files.find(FID);
- assert(It != Files.end());
-
- const MarkedFile &File = It->second;
- // `Spelled` must be a subrange of `File.SpelledTokens`.
- assert(File.SpelledTokens.data() <= Spelled.data());
- assert(&Spelled.back() <=
- File.SpelledTokens.data() + File.SpelledTokens.size());
-#ifndef NDEBUG
- auto T1 = Spelled.back().location();
- auto T2 = File.SpelledTokens.back().location();
- assert(T1 == T2 || sourceManager().isBeforeInTranslationUnit(T1, T2));
-#endif
+ const auto &File = fileForSpelled(Spelled);
auto *FrontMapping = mappingStartingBeforeSpelled(File, &Spelled.front());
unsigned SpelledFrontI = &Spelled.front() - File.SpelledTokens.data();
@@ -395,16 +380,39 @@ TokenBuffer::spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const {
: LastSpelled + 1);
}
+TokenBuffer::Expansion TokenBuffer::makeExpansion(const MarkedFile &F,
+ const Mapping &M) const {
+ Expansion E;
+ E.Spelled = llvm::makeArrayRef(F.SpelledTokens.data() + M.BeginSpelled,
+ F.SpelledTokens.data() + M.EndSpelled);
+ E.Expanded = llvm::makeArrayRef(ExpandedTokens.data() + M.BeginExpanded,
+ ExpandedTokens.data() + M.EndExpanded);
+ return E;
+}
+
+const TokenBuffer::MarkedFile &
+TokenBuffer::fileForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const {
+ assert(!Spelled.empty());
+ assert(Spelled.front().location().isFileID() && "not a spelled token");
+ auto FileIt = Files.find(SourceMgr->getFileID(Spelled.front().location()));
+ assert(FileIt != Files.end() && "file not tracked by token buffer");
+ const auto &File = FileIt->second;
+ assert(File.SpelledTokens.data() <= Spelled.data() &&
+ Spelled.end() <=
+ (File.SpelledTokens.data() + File.SpelledTokens.size()) &&
+ "Tokens not in spelled range");
+#ifndef NDEBUG
+ auto T1 = Spelled.back().location();
+ auto T2 = File.SpelledTokens.back().location();
+ assert(T1 == T2 || sourceManager().isBeforeInTranslationUnit(T1, T2));
+#endif
+ return File;
+}
+
llvm::Optional<TokenBuffer::Expansion>
TokenBuffer::expansionStartingAt(const syntax::Token *Spelled) const {
assert(Spelled);
- assert(Spelled->location().isFileID() && "not a spelled token");
- auto FileIt = Files.find(SourceMgr->getFileID(Spelled->location()));
- assert(FileIt != Files.end() && "file not tracked by token buffer");
-
- auto &File = FileIt->second;
- assert(File.SpelledTokens.data() <= Spelled &&
- Spelled < (File.SpelledTokens.data() + File.SpelledTokens.size()));
+ const auto &File = fileForSpelled(*Spelled);
unsigned SpelledIndex = Spelled - File.SpelledTokens.data();
auto M = llvm::partition_point(File.Mappings, [&](const Mapping &M) {
@@ -412,14 +420,27 @@ TokenBuffer::expansionStartingAt(const syntax::Token *Spelled) const {
});
if (M == File.Mappings.end() || M->BeginSpelled != SpelledIndex)
return llvm::None;
+ return makeExpansion(File, *M);
+}
- Expansion E;
- E.Spelled = llvm::makeArrayRef(File.SpelledTokens.data() + M->BeginSpelled,
- File.SpelledTokens.data() + M->EndSpelled);
- E.Expanded = llvm::makeArrayRef(ExpandedTokens.data() + M->BeginExpanded,
- ExpandedTokens.data() + M->EndExpanded);
- return E;
+std::vector<TokenBuffer::Expansion> TokenBuffer::expansionsOverlapping(
+ llvm::ArrayRef<syntax::Token> Spelled) const {
+ if (Spelled.empty())
+ return {};
+ const auto &File = fileForSpelled(Spelled);
+
+ // Find the first overlapping range, and then copy until we stop overlapping.
+ unsigned SpelledBeginIndex = Spelled.begin() - File.SpelledTokens.data();
+ unsigned SpelledEndIndex = Spelled.end() - File.SpelledTokens.data();
+ auto M = llvm::partition_point(File.Mappings, [&](const Mapping &M) {
+ return M.EndSpelled <= SpelledBeginIndex;
+ });
+ std::vector<TokenBuffer::Expansion> Expansions;
+ for (; M != File.Mappings.end() && M->BeginSpelled < SpelledEndIndex; ++M)
+ Expansions.push_back(makeExpansion(File, *M));
+ return Expansions;
}
+
llvm::ArrayRef<syntax::Token>
syntax::spelledTokensTouching(SourceLocation Loc,
llvm::ArrayRef<syntax::Token> Tokens) {
@@ -554,11 +575,11 @@ public:
// A's startpoint.
if (!Range.getBegin().isFileID()) {
Range.setBegin(SM.getExpansionLoc(Range.getBegin()));
- assert(Collector->Expansions.count(Range.getBegin().getRawEncoding()) &&
+ assert(Collector->Expansions.count(Range.getBegin()) &&
"Overlapping macros should have same expansion location");
}
- Collector->Expansions[Range.getBegin().getRawEncoding()] = Range.getEnd();
+ Collector->Expansions[Range.getBegin()] = Range.getEnd();
LastExpansionEnd = Range.getEnd();
}
// FIXME: handle directives like #pragma, #include, etc.
@@ -690,8 +711,8 @@ private:
// If we know mapping bounds at [NextSpelled, KnownEnd] (macro expansion)
// then we want to partition our (empty) mapping.
// [Start, NextSpelled) [NextSpelled, KnownEnd] (KnownEnd, Target)
- SourceLocation KnownEnd = CollectedExpansions.lookup(
- SpelledTokens[NextSpelled].location().getRawEncoding());
+ SourceLocation KnownEnd =
+ CollectedExpansions.lookup(SpelledTokens[NextSpelled].location());
if (KnownEnd.isValid()) {
FlushMapping(); // Emits [Start, NextSpelled)
while (NextSpelled < SpelledTokens.size() &&
@@ -728,7 +749,7 @@ private:
// We need no mapping for file tokens copied to the expanded stream.
} else {
// We found a new macro expansion. We should have its spelling bounds.
- auto End = CollectedExpansions.lookup(Expansion.getRawEncoding());
+ auto End = CollectedExpansions.lookup(Expansion);
assert(End.isValid() && "Macro expansion wasn't captured?");
// Mapping starts here...
diff --git a/clang/lib/Tooling/Syntax/Tree.cpp b/clang/lib/Tooling/Syntax/Tree.cpp
index 37579e6145b6..07ee13e313f5 100644
--- a/clang/lib/Tooling/Syntax/Tree.cpp
+++ b/clang/lib/Tooling/Syntax/Tree.cpp
@@ -19,8 +19,8 @@ namespace {
static void traverse(const syntax::Node *N,
llvm::function_ref<void(const syntax::Node *)> Visit) {
if (auto *T = dyn_cast<syntax::Tree>(N)) {
- for (auto *C = T->firstChild(); C; C = C->nextSibling())
- traverse(C, Visit);
+ for (const syntax::Node &C : T->getChildren())
+ traverse(&C, Visit);
}
Visit(N);
}
@@ -33,14 +33,14 @@ static void traverse(syntax::Node *N,
} // namespace
syntax::Arena::Arena(SourceManager &SourceMgr, const LangOptions &LangOpts,
- TokenBuffer Tokens)
- : SourceMgr(SourceMgr), LangOpts(LangOpts), Tokens(std::move(Tokens)) {}
+ const TokenBuffer &Tokens)
+ : SourceMgr(SourceMgr), LangOpts(LangOpts), Tokens(Tokens) {}
-const clang::syntax::TokenBuffer &syntax::Arena::tokenBuffer() const {
+const syntax::TokenBuffer &syntax::Arena::getTokenBuffer() const {
return Tokens;
}
-std::pair<FileID, llvm::ArrayRef<syntax::Token>>
+std::pair<FileID, ArrayRef<syntax::Token>>
syntax::Arena::lexBuffer(std::unique_ptr<llvm::MemoryBuffer> Input) {
auto FID = SourceMgr.createFileID(std::move(Input));
auto It = ExtraTokens.try_emplace(FID, tokenize(FID, SourceMgr, LangOpts));
@@ -52,26 +52,47 @@ syntax::Leaf::Leaf(const syntax::Token *Tok) : Node(NodeKind::Leaf), Tok(Tok) {
assert(Tok != nullptr);
}
-bool syntax::Leaf::classof(const Node *N) {
- return N->kind() == NodeKind::Leaf;
-}
-
syntax::Node::Node(NodeKind Kind)
- : Parent(nullptr), NextSibling(nullptr), Kind(static_cast<unsigned>(Kind)),
- Role(0), Original(false), CanModify(false) {
+ : Parent(nullptr), NextSibling(nullptr), PreviousSibling(nullptr),
+ Kind(static_cast<unsigned>(Kind)), Role(0), Original(false),
+ CanModify(false) {
this->setRole(NodeRole::Detached);
}
-bool syntax::Node::isDetached() const { return role() == NodeRole::Detached; }
+bool syntax::Node::isDetached() const {
+ return getRole() == NodeRole::Detached;
+}
void syntax::Node::setRole(NodeRole NR) {
this->Role = static_cast<unsigned>(NR);
}
-bool syntax::Tree::classof(const Node *N) { return N->kind() > NodeKind::Leaf; }
+void syntax::Tree::appendChildLowLevel(Node *Child, NodeRole Role) {
+ assert(Child->getRole() == NodeRole::Detached);
+ assert(Role != NodeRole::Detached);
+
+ Child->setRole(Role);
+ appendChildLowLevel(Child);
+}
+
+void syntax::Tree::appendChildLowLevel(Node *Child) {
+ assert(Child->Parent == nullptr);
+ assert(Child->NextSibling == nullptr);
+ assert(Child->PreviousSibling == nullptr);
+ assert(Child->getRole() != NodeRole::Detached);
+
+ Child->Parent = this;
+ if (this->LastChild) {
+ Child->PreviousSibling = this->LastChild;
+ this->LastChild->NextSibling = Child;
+ } else
+ this->FirstChild = Child;
+
+ this->LastChild = Child;
+}
void syntax::Tree::prependChildLowLevel(Node *Child, NodeRole Role) {
- assert(Child->role() == NodeRole::Detached);
+ assert(Child->getRole() == NodeRole::Detached);
assert(Role != NodeRole::Detached);
Child->setRole(Role);
@@ -81,135 +102,166 @@ void syntax::Tree::prependChildLowLevel(Node *Child, NodeRole Role) {
void syntax::Tree::prependChildLowLevel(Node *Child) {
assert(Child->Parent == nullptr);
assert(Child->NextSibling == nullptr);
- assert(Child->role() != NodeRole::Detached);
+ assert(Child->PreviousSibling == nullptr);
+ assert(Child->getRole() != NodeRole::Detached);
Child->Parent = this;
- Child->NextSibling = this->FirstChild;
+ if (this->FirstChild) {
+ Child->NextSibling = this->FirstChild;
+ this->FirstChild->PreviousSibling = Child;
+ } else
+ this->LastChild = Child;
+
this->FirstChild = Child;
}
-void syntax::Tree::replaceChildRangeLowLevel(Node *BeforeBegin, Node *End,
+void syntax::Tree::replaceChildRangeLowLevel(Node *Begin, Node *End,
Node *New) {
- assert(!BeforeBegin || BeforeBegin->Parent == this);
+ assert((!Begin || Begin->Parent == this) &&
+ "`Begin` is not a child of `this`.");
+ assert((!End || End->Parent == this) && "`End` is not a child of `this`.");
+ assert(canModify() && "Cannot modify `this`.");
#ifndef NDEBUG
- for (auto *N = New; N; N = N->nextSibling()) {
+ for (auto *N = New; N; N = N->NextSibling) {
assert(N->Parent == nullptr);
- assert(N->role() != NodeRole::Detached && "Roles must be set");
+ assert(N->getRole() != NodeRole::Detached && "Roles must be set");
// FIXME: sanity-check the role.
}
+
+ auto Reachable = [](Node *From, Node *N) {
+ if (!N)
+ return true;
+ for (auto *It = From; It; It = It->NextSibling)
+ if (It == N)
+ return true;
+ return false;
+ };
+ assert(Reachable(FirstChild, Begin) && "`Begin` is not reachable.");
+ assert(Reachable(Begin, End) && "`End` is not after `Begin`.");
#endif
+ if (!New && Begin == End)
+ return;
+
+ // Mark modification.
+ for (auto *T = this; T && T->Original; T = T->Parent)
+ T->Original = false;
+
+ // Save the node before the range to be removed. Later we insert the `New`
+ // range after this node.
+ auto *BeforeBegin = Begin ? Begin->PreviousSibling : LastChild;
+
// Detach old nodes.
- for (auto *N = !BeforeBegin ? FirstChild : BeforeBegin->nextSibling();
- N != End;) {
+ for (auto *N = Begin; N != End;) {
auto *Next = N->NextSibling;
N->setRole(NodeRole::Detached);
N->Parent = nullptr;
N->NextSibling = nullptr;
+ N->PreviousSibling = nullptr;
if (N->Original)
- traverse(N, [&](Node *C) { C->Original = false; });
+ traverse(N, [](Node *C) { C->Original = false; });
N = Next;
}
- // Attach new nodes.
- if (BeforeBegin)
- BeforeBegin->NextSibling = New ? New : End;
- else
- FirstChild = New ? New : End;
+ // Attach new range.
+ auto *&NewFirst = BeforeBegin ? BeforeBegin->NextSibling : FirstChild;
+ auto *&NewLast = End ? End->PreviousSibling : LastChild;
- if (New) {
- auto *Last = New;
- for (auto *N = New; N != nullptr; N = N->nextSibling()) {
- Last = N;
- N->Parent = this;
- }
- Last->NextSibling = End;
+ if (!New) {
+ NewFirst = End;
+ NewLast = BeforeBegin;
+ return;
}
- // Mark the node as modified.
- for (auto *T = this; T && T->Original; T = T->Parent)
- T->Original = false;
+ New->PreviousSibling = BeforeBegin;
+ NewFirst = New;
+
+ Node *LastInNew;
+ for (auto *N = New; N != nullptr; N = N->NextSibling) {
+ LastInNew = N;
+ N->Parent = this;
+ }
+ LastInNew->NextSibling = End;
+ NewLast = LastInNew;
}
namespace {
-static void dumpTokens(llvm::raw_ostream &OS, ArrayRef<syntax::Token> Tokens,
- const SourceManager &SM) {
- assert(!Tokens.empty());
- bool First = true;
- for (const auto &T : Tokens) {
- if (!First)
- OS << " ";
- else
- First = false;
- // Handle 'eof' separately, calling text() on it produces an empty string.
- if (T.kind() == tok::eof) {
- OS << "<eof>";
- continue;
- }
- OS << T.text(SM);
- }
+static void dumpLeaf(raw_ostream &OS, const syntax::Leaf *L,
+ const SourceManager &SM) {
+ assert(L);
+ const auto *Token = L->getToken();
+ assert(Token);
+ // Handle 'eof' separately, calling text() on it produces an empty string.
+ if (Token->kind() == tok::eof)
+ OS << "<eof>";
+ else
+ OS << Token->text(SM);
}
-static void dumpTree(llvm::raw_ostream &OS, const syntax::Node *N,
- const syntax::Arena &A, std::vector<bool> IndentMask) {
- std::string Marks;
- if (!N->isOriginal())
- Marks += "M";
- if (N->role() == syntax::NodeRole::Detached)
- Marks += "*"; // FIXME: find a nice way to print other roles.
- if (!N->canModify())
- Marks += "I";
- if (!Marks.empty())
- OS << Marks << ": ";
-
- if (auto *L = llvm::dyn_cast<syntax::Leaf>(N)) {
- dumpTokens(OS, *L->token(), A.sourceManager());
+static void dumpNode(raw_ostream &OS, const syntax::Node *N,
+ const SourceManager &SM, std::vector<bool> IndentMask) {
+ auto DumpExtraInfo = [&OS](const syntax::Node *N) {
+ if (N->getRole() != syntax::NodeRole::Unknown)
+ OS << " " << N->getRole();
+ if (!N->isOriginal())
+ OS << " synthesized";
+ if (!N->canModify())
+ OS << " unmodifiable";
+ };
+
+ assert(N);
+ if (const auto *L = dyn_cast<syntax::Leaf>(N)) {
+ OS << "'";
+ dumpLeaf(OS, L, SM);
+ OS << "'";
+ DumpExtraInfo(N);
OS << "\n";
return;
}
- auto *T = llvm::cast<syntax::Tree>(N);
- OS << T->kind() << "\n";
+ const auto *T = cast<syntax::Tree>(N);
+ OS << T->getKind();
+ DumpExtraInfo(N);
+ OS << "\n";
- for (auto It = T->firstChild(); It != nullptr; It = It->nextSibling()) {
+ for (const syntax::Node &It : T->getChildren()) {
for (bool Filled : IndentMask) {
if (Filled)
OS << "| ";
else
OS << " ";
}
- if (!It->nextSibling()) {
+ if (!It.getNextSibling()) {
OS << "`-";
IndentMask.push_back(false);
} else {
OS << "|-";
IndentMask.push_back(true);
}
- dumpTree(OS, It, A, IndentMask);
+ dumpNode(OS, &It, SM, IndentMask);
IndentMask.pop_back();
}
}
} // namespace
-std::string syntax::Node::dump(const Arena &A) const {
+std::string syntax::Node::dump(const SourceManager &SM) const {
std::string Str;
llvm::raw_string_ostream OS(Str);
- dumpTree(OS, this, A, /*IndentMask=*/{});
+ dumpNode(OS, this, SM, /*IndentMask=*/{});
return std::move(OS.str());
}
-std::string syntax::Node::dumpTokens(const Arena &A) const {
+std::string syntax::Node::dumpTokens(const SourceManager &SM) const {
std::string Storage;
llvm::raw_string_ostream OS(Storage);
traverse(this, [&](const syntax::Node *N) {
- auto *L = llvm::dyn_cast<syntax::Leaf>(N);
- if (!L)
- return;
- ::dumpTokens(OS, *L->token(), A.sourceManager());
- OS << " ";
+ if (const auto *L = dyn_cast<syntax::Leaf>(N)) {
+ dumpLeaf(OS, L, SM);
+ OS << " ";
+ }
});
return OS.str();
}
@@ -217,19 +269,37 @@ std::string syntax::Node::dumpTokens(const Arena &A) const {
void syntax::Node::assertInvariants() const {
#ifndef NDEBUG
if (isDetached())
- assert(parent() == nullptr);
+ assert(getParent() == nullptr);
else
- assert(parent() != nullptr);
+ assert(getParent() != nullptr);
- auto *T = dyn_cast<Tree>(this);
+ const auto *T = dyn_cast<Tree>(this);
if (!T)
return;
- for (auto *C = T->firstChild(); C; C = C->nextSibling()) {
+ for (const Node &C : T->getChildren()) {
if (T->isOriginal())
- assert(C->isOriginal());
- assert(!C->isDetached());
- assert(C->parent() == T);
+ assert(C.isOriginal());
+ assert(!C.isDetached());
+ assert(C.getParent() == T);
+ const auto *Next = C.getNextSibling();
+ assert(!Next || &C == Next->getPreviousSibling());
+ if (!C.getNextSibling())
+ assert(&C == T->getLastChild() &&
+ "Last child is reachable by advancing from the first child.");
+ }
+
+ const auto *L = dyn_cast<List>(T);
+ if (!L)
+ return;
+ for (const Node &C : T->getChildren()) {
+ assert(C.getRole() == NodeRole::ListElement ||
+ C.getRole() == NodeRole::ListDelimiter);
+ if (C.getRole() == NodeRole::ListDelimiter) {
+ assert(isa<Leaf>(C));
+ assert(cast<Leaf>(C).getToken()->kind() == L->getDelimiterTokenKind());
+ }
}
+
#endif
}
@@ -239,34 +309,162 @@ void syntax::Node::assertInvariantsRecursive() const {
#endif
}
-syntax::Leaf *syntax::Tree::firstLeaf() {
- auto *T = this;
- while (auto *C = T->firstChild()) {
- if (auto *L = dyn_cast<syntax::Leaf>(C))
+const syntax::Leaf *syntax::Tree::findFirstLeaf() const {
+ for (const Node &C : getChildren()) {
+ if (const auto *L = dyn_cast<syntax::Leaf>(&C))
+ return L;
+ if (const auto *L = cast<syntax::Tree>(C).findFirstLeaf())
return L;
- T = cast<syntax::Tree>(C);
}
return nullptr;
}
-syntax::Leaf *syntax::Tree::lastLeaf() {
- auto *T = this;
- while (auto *C = T->firstChild()) {
- // Find the last child.
- while (auto *Next = C->nextSibling())
- C = Next;
-
- if (auto *L = dyn_cast<syntax::Leaf>(C))
+const syntax::Leaf *syntax::Tree::findLastLeaf() const {
+ for (const auto *C = getLastChild(); C; C = C->getPreviousSibling()) {
+ if (const auto *L = dyn_cast<syntax::Leaf>(C))
+ return L;
+ if (const auto *L = cast<syntax::Tree>(C)->findLastLeaf())
return L;
- T = cast<syntax::Tree>(C);
}
return nullptr;
}
-syntax::Node *syntax::Tree::findChild(NodeRole R) {
- for (auto *C = FirstChild; C; C = C->nextSibling()) {
- if (C->role() == R)
- return C;
+const syntax::Node *syntax::Tree::findChild(NodeRole R) const {
+ for (const Node &C : getChildren()) {
+ if (C.getRole() == R)
+ return &C;
}
return nullptr;
}
+
+std::vector<syntax::List::ElementAndDelimiter<syntax::Node>>
+syntax::List::getElementsAsNodesAndDelimiters() {
+ if (!getFirstChild())
+ return {};
+
+ std::vector<syntax::List::ElementAndDelimiter<Node>> Children;
+ syntax::Node *ElementWithoutDelimiter = nullptr;
+ for (Node &C : getChildren()) {
+ switch (C.getRole()) {
+ case syntax::NodeRole::ListElement: {
+ if (ElementWithoutDelimiter) {
+ Children.push_back({ElementWithoutDelimiter, nullptr});
+ }
+ ElementWithoutDelimiter = &C;
+ break;
+ }
+ case syntax::NodeRole::ListDelimiter: {
+ Children.push_back({ElementWithoutDelimiter, cast<syntax::Leaf>(&C)});
+ ElementWithoutDelimiter = nullptr;
+ break;
+ }
+ default:
+ llvm_unreachable(
+ "A list can have only elements and delimiters as children.");
+ }
+ }
+
+ switch (getTerminationKind()) {
+ case syntax::List::TerminationKind::Separated: {
+ Children.push_back({ElementWithoutDelimiter, nullptr});
+ break;
+ }
+ case syntax::List::TerminationKind::Terminated:
+ case syntax::List::TerminationKind::MaybeTerminated: {
+ if (ElementWithoutDelimiter) {
+ Children.push_back({ElementWithoutDelimiter, nullptr});
+ }
+ break;
+ }
+ }
+
+ return Children;
+}
+
+// Almost the same implementation of `getElementsAsNodesAndDelimiters` but
+// ignoring delimiters
+std::vector<syntax::Node *> syntax::List::getElementsAsNodes() {
+ if (!getFirstChild())
+ return {};
+
+ std::vector<syntax::Node *> Children;
+ syntax::Node *ElementWithoutDelimiter = nullptr;
+ for (Node &C : getChildren()) {
+ switch (C.getRole()) {
+ case syntax::NodeRole::ListElement: {
+ if (ElementWithoutDelimiter) {
+ Children.push_back(ElementWithoutDelimiter);
+ }
+ ElementWithoutDelimiter = &C;
+ break;
+ }
+ case syntax::NodeRole::ListDelimiter: {
+ Children.push_back(ElementWithoutDelimiter);
+ ElementWithoutDelimiter = nullptr;
+ break;
+ }
+ default:
+ llvm_unreachable("A list has only elements or delimiters.");
+ }
+ }
+
+ switch (getTerminationKind()) {
+ case syntax::List::TerminationKind::Separated: {
+ Children.push_back(ElementWithoutDelimiter);
+ break;
+ }
+ case syntax::List::TerminationKind::Terminated:
+ case syntax::List::TerminationKind::MaybeTerminated: {
+ if (ElementWithoutDelimiter) {
+ Children.push_back(ElementWithoutDelimiter);
+ }
+ break;
+ }
+ }
+
+ return Children;
+}
+
+clang::tok::TokenKind syntax::List::getDelimiterTokenKind() const {
+ switch (this->getKind()) {
+ case NodeKind::NestedNameSpecifier:
+ return clang::tok::coloncolon;
+ case NodeKind::CallArguments:
+ case NodeKind::ParameterDeclarationList:
+ case NodeKind::DeclaratorList:
+ return clang::tok::comma;
+ default:
+ llvm_unreachable("This is not a subclass of List, thus "
+ "getDelimiterTokenKind() cannot be called");
+ }
+}
+
+syntax::List::TerminationKind syntax::List::getTerminationKind() const {
+ switch (this->getKind()) {
+ case NodeKind::NestedNameSpecifier:
+ return TerminationKind::Terminated;
+ case NodeKind::CallArguments:
+ case NodeKind::ParameterDeclarationList:
+ case NodeKind::DeclaratorList:
+ return TerminationKind::Separated;
+ default:
+ llvm_unreachable("This is not a subclass of List, thus "
+ "getTerminationKind() cannot be called");
+ }
+}
+
+bool syntax::List::canBeEmpty() const {
+ switch (this->getKind()) {
+ case NodeKind::NestedNameSpecifier:
+ return false;
+ case NodeKind::CallArguments:
+ return true;
+ case NodeKind::ParameterDeclarationList:
+ return true;
+ case NodeKind::DeclaratorList:
+ return true;
+ default:
+ llvm_unreachable("This is not a subclass of List, thus canBeEmpty() "
+ "cannot be called");
+ }
+}
diff --git a/clang/lib/Tooling/Tooling.cpp b/clang/lib/Tooling/Tooling.cpp
index 40b6cff0d627..79851ac723da 100644
--- a/clang/lib/Tooling/Tooling.cpp
+++ b/clang/lib/Tooling/Tooling.cpp
@@ -78,7 +78,7 @@ newDriver(DiagnosticsEngine *Diagnostics, const char *BinaryName,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
driver::Driver *CompilerDriver =
new driver::Driver(BinaryName, llvm::sys::getDefaultTargetTriple(),
- *Diagnostics, std::move(VFS));
+ *Diagnostics, "clang LLVM compiler", std::move(VFS));
CompilerDriver->setTitle("clang_based_tool");
return CompilerDriver;
}
@@ -245,27 +245,38 @@ std::string getAbsolutePath(StringRef File) {
void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
StringRef InvokedAs) {
- if (!CommandLine.empty() && !InvokedAs.empty()) {
- bool AlreadyHasTarget = false;
- bool AlreadyHasMode = false;
- // Skip CommandLine[0].
- for (auto Token = ++CommandLine.begin(); Token != CommandLine.end();
- ++Token) {
- StringRef TokenRef(*Token);
- AlreadyHasTarget |=
- (TokenRef == "-target" || TokenRef.startswith("-target="));
- AlreadyHasMode |= (TokenRef == "--driver-mode" ||
- TokenRef.startswith("--driver-mode="));
- }
- auto TargetMode =
- driver::ToolChain::getTargetAndModeFromProgramName(InvokedAs);
- if (!AlreadyHasMode && TargetMode.DriverMode) {
- CommandLine.insert(++CommandLine.begin(), TargetMode.DriverMode);
- }
- if (!AlreadyHasTarget && TargetMode.TargetIsValid) {
- CommandLine.insert(++CommandLine.begin(), {"-target",
- TargetMode.TargetPrefix});
- }
+ if (CommandLine.empty() || InvokedAs.empty())
+ return;
+ const auto &Table = driver::getDriverOptTable();
+ // --target=X
+ const std::string TargetOPT =
+ Table.getOption(driver::options::OPT_target).getPrefixedName();
+ // -target X
+ const std::string TargetOPTLegacy =
+ Table.getOption(driver::options::OPT_target_legacy_spelling)
+ .getPrefixedName();
+ // --driver-mode=X
+ const std::string DriverModeOPT =
+ Table.getOption(driver::options::OPT_driver_mode).getPrefixedName();
+ auto TargetMode =
+ driver::ToolChain::getTargetAndModeFromProgramName(InvokedAs);
+ // No need to search for target args if we don't have a target/mode to insert.
+ bool ShouldAddTarget = TargetMode.TargetIsValid;
+ bool ShouldAddMode = TargetMode.DriverMode != nullptr;
+ // Skip CommandLine[0].
+ for (auto Token = ++CommandLine.begin(); Token != CommandLine.end();
+ ++Token) {
+ StringRef TokenRef(*Token);
+ ShouldAddTarget = ShouldAddTarget && !TokenRef.startswith(TargetOPT) &&
+ !TokenRef.equals(TargetOPTLegacy);
+ ShouldAddMode = ShouldAddMode && !TokenRef.startswith(DriverModeOPT);
+ }
+ if (ShouldAddMode) {
+ CommandLine.insert(++CommandLine.begin(), TargetMode.DriverMode);
+ }
+ if (ShouldAddTarget) {
+ CommandLine.insert(++CommandLine.begin(),
+ TargetOPT + TargetMode.TargetPrefix);
}
}
@@ -308,12 +319,6 @@ ToolInvocation::~ToolInvocation() {
delete Action;
}
-void ToolInvocation::mapVirtualFile(StringRef FilePath, StringRef Content) {
- SmallString<1024> PathStorage;
- llvm::sys::path::native(FilePath, PathStorage);
- MappedFileContents[PathStorage] = Content;
-}
-
bool ToolInvocation::run() {
std::vector<const char*> Argv;
for (const std::string &Str : CommandLine)
@@ -348,14 +353,6 @@ bool ToolInvocation::run() {
return false;
std::unique_ptr<CompilerInvocation> Invocation(
newInvocation(&Diagnostics, *CC1Args, BinaryName));
- // FIXME: remove this when all users have migrated!
- for (const auto &It : MappedFileContents) {
- // Inject the code as the given file name into the preprocessor options.
- std::unique_ptr<llvm::MemoryBuffer> Input =
- llvm::MemoryBuffer::getMemBuffer(It.getValue());
- Invocation->getPreprocessorOpts().addRemappedFile(It.getKey(),
- Input.release());
- }
return runInvocation(BinaryName, Compilation.get(), std::move(Invocation),
std::move(PCHContainerOps));
}
@@ -648,7 +645,7 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
if (!Invocation.run())
return nullptr;
-
+
assert(ASTs.size() == 1);
return std::move(ASTs[0]);
}
diff --git a/clang/lib/Tooling/Transformer/Parsing.cpp b/clang/lib/Tooling/Transformer/Parsing.cpp
index 1579115b9313..66fa04a15594 100644
--- a/clang/lib/Tooling/Transformer/Parsing.cpp
+++ b/clang/lib/Tooling/Transformer/Parsing.cpp
@@ -109,14 +109,14 @@ getUnaryRangeSelectors() {
static const llvm::StringMap<RangeSelectorOp<std::string, std::string>> &
getBinaryStringSelectors() {
static const llvm::StringMap<RangeSelectorOp<std::string, std::string>> M = {
- {"encloseNodes", range}};
+ {"encloseNodes", encloseNodes}};
return M;
}
static const llvm::StringMap<RangeSelectorOp<RangeSelector, RangeSelector>> &
getBinaryRangeSelectors() {
static const llvm::StringMap<RangeSelectorOp<RangeSelector, RangeSelector>>
- M = {{"enclose", range}};
+ M = {{"enclose", enclose}, {"between", between}};
return M;
}
@@ -148,7 +148,7 @@ static ParseState advance(ParseState S, size_t N) {
}
static StringRef consumeWhitespace(StringRef S) {
- return S.drop_while([](char c) { return c >= 0 && isWhitespace(c); });
+ return S.drop_while([](char c) { return isASCII(c) && isWhitespace(c); });
}
// Parses a single expected character \c c from \c State, skipping preceding
@@ -165,7 +165,7 @@ static ExpectedProgress<llvm::NoneType> parseChar(char c, ParseState State) {
static ExpectedProgress<std::string> parseId(ParseState State) {
State.Input = consumeWhitespace(State.Input);
auto Id = State.Input.take_while(
- [](char c) { return c >= 0 && isIdentifierBody(c); });
+ [](char c) { return isASCII(c) && isIdentifierBody(c); });
if (Id.empty())
return makeParseError(State, "failed to parse name");
return makeParseProgress(advance(State, Id.size()), Id.str());
diff --git a/clang/lib/Tooling/Transformer/RangeSelector.cpp b/clang/lib/Tooling/Transformer/RangeSelector.cpp
index 29b1a5b0372e..0f3138db218a 100644
--- a/clang/lib/Tooling/Transformer/RangeSelector.cpp
+++ b/clang/lib/Tooling/Transformer/RangeSelector.cpp
@@ -116,11 +116,24 @@ RangeSelector transformer::after(RangeSelector Selector) {
Expected<CharSourceRange> SelectedRange = Selector(Result);
if (!SelectedRange)
return SelectedRange.takeError();
- if (SelectedRange->isCharRange())
- return CharSourceRange::getCharRange(SelectedRange->getEnd());
- return CharSourceRange::getCharRange(Lexer::getLocForEndOfToken(
- SelectedRange->getEnd(), 0, Result.Context->getSourceManager(),
- Result.Context->getLangOpts()));
+ SourceLocation End = SelectedRange->getEnd();
+ if (SelectedRange->isTokenRange()) {
+ // We need to find the actual (exclusive) end location from which to
+ // create a new source range. However, that's not guaranteed to be valid,
+ // even if the token location itself is valid. So, we create a token range
+ // consisting only of the last token, then map that range back to the
+ // source file. If that succeeds, we have a valid location for the end of
+ // the generated range.
+ CharSourceRange Range = Lexer::makeFileCharRange(
+ CharSourceRange::getTokenRange(SelectedRange->getEnd()),
+ *Result.SourceManager, Result.Context->getLangOpts());
+ if (Range.isInvalid())
+ return invalidArgumentError(
+ "after: can't resolve sub-range to valid source range");
+ End = Range.getEnd();
+ }
+
+ return CharSourceRange::getCharRange(End);
};
}
@@ -129,7 +142,8 @@ RangeSelector transformer::node(std::string ID) {
Expected<DynTypedNode> Node = getNode(Result.Nodes, ID);
if (!Node)
return Node.takeError();
- return Node->get<Stmt>() != nullptr && Node->get<Expr>() == nullptr
+ return (Node->get<Decl>() != nullptr ||
+ (Node->get<Stmt>() != nullptr && Node->get<Expr>() == nullptr))
? tooling::getExtendedRange(*Node, tok::TokenKind::semi,
*Result.Context)
: CharSourceRange::getTokenRange(Node->getSourceRange());
diff --git a/clang/lib/Tooling/Transformer/RewriteRule.cpp b/clang/lib/Tooling/Transformer/RewriteRule.cpp
index 995bec03cd66..93bd7e91dba7 100644
--- a/clang/lib/Tooling/Transformer/RewriteRule.cpp
+++ b/clang/lib/Tooling/Transformer/RewriteRule.cpp
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Transformer/RewriteRule.h"
+#include "clang/AST/ASTTypeTraits.h"
+#include "clang/AST/Stmt.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Basic/SourceLocation.h"
@@ -28,6 +30,8 @@ using ast_matchers::internal::DynTypedMatcher;
using MatchResult = MatchFinder::MatchResult;
+const char transformer::RootID[] = "___root___";
+
static Expected<SmallVector<transformer::Edit, 1>>
translateEdits(const MatchResult &Result, ArrayRef<ASTEdit> ASTEdits) {
SmallVector<transformer::Edit, 1> Edits;
@@ -38,16 +42,25 @@ translateEdits(const MatchResult &Result, ArrayRef<ASTEdit> ASTEdits) {
llvm::Optional<CharSourceRange> EditRange =
tooling::getRangeForEdit(*Range, *Result.Context);
// FIXME: let user specify whether to treat this case as an error or ignore
- // it as is currently done.
+ // it as is currently done. This behavior is problematic in that it hides
+ // failures from bad ranges. Also, the behavior here differs from
+ // `flatten`. Here, we abort (without error), whereas flatten, if it hits an
+ // empty list, does not abort. As a result, `editList({A,B})` is not
+ // equivalent to `flatten(edit(A), edit(B))`. The former will abort if `A`
+ // produces a bad range, whereas the latter will simply ignore A.
if (!EditRange)
return SmallVector<Edit, 0>();
auto Replacement = E.Replacement->eval(Result);
if (!Replacement)
return Replacement.takeError();
+ auto Metadata = E.Metadata(Result);
+ if (!Metadata)
+ return Metadata.takeError();
transformer::Edit T;
+ T.Kind = E.Kind;
T.Range = *EditRange;
T.Replacement = std::move(*Replacement);
- T.Metadata = E.Metadata;
+ T.Metadata = std::move(*Metadata);
Edits.push_back(std::move(T));
}
return Edits;
@@ -65,6 +78,42 @@ EditGenerator transformer::edit(ASTEdit Edit) {
};
}
+EditGenerator transformer::noopEdit(RangeSelector Anchor) {
+ return [Anchor = std::move(Anchor)](const MatchResult &Result)
+ -> Expected<SmallVector<transformer::Edit, 1>> {
+ Expected<CharSourceRange> Range = Anchor(Result);
+ if (!Range)
+ return Range.takeError();
+ // In case the range is inside a macro expansion, map the location back to a
+ // "real" source location.
+ SourceLocation Begin =
+ Result.SourceManager->getSpellingLoc(Range->getBegin());
+ Edit E;
+ // Implicitly, leave `E.Replacement` as the empty string.
+ E.Kind = EditKind::Range;
+ E.Range = CharSourceRange::getCharRange(Begin, Begin);
+ return SmallVector<Edit, 1>{E};
+ };
+}
+
+EditGenerator
+transformer::flattenVector(SmallVector<EditGenerator, 2> Generators) {
+ if (Generators.size() == 1)
+ return std::move(Generators[0]);
+ return
+ [Gs = std::move(Generators)](
+ const MatchResult &Result) -> llvm::Expected<SmallVector<Edit, 1>> {
+ SmallVector<Edit, 1> AllEdits;
+ for (const auto &G : Gs) {
+ llvm::Expected<SmallVector<Edit, 1>> Edits = G(Result);
+ if (!Edits)
+ return Edits.takeError();
+ AllEdits.append(Edits->begin(), Edits->end());
+ }
+ return AllEdits;
+ };
+}
+
ASTEdit transformer::changeTo(RangeSelector Target, TextGenerator Replacement) {
ASTEdit E;
E.TargetRange = std::move(Target);
@@ -90,21 +139,196 @@ public:
};
} // namespace
+static TextGenerator makeText(std::string S) {
+ return std::make_shared<SimpleTextGenerator>(std::move(S));
+}
+
ASTEdit transformer::remove(RangeSelector S) {
- return change(std::move(S), std::make_shared<SimpleTextGenerator>(""));
+ return change(std::move(S), makeText(""));
+}
+
+static std::string formatHeaderPath(StringRef Header, IncludeFormat Format) {
+ switch (Format) {
+ case transformer::IncludeFormat::Quoted:
+ return Header.str();
+ case transformer::IncludeFormat::Angled:
+ return ("<" + Header + ">").str();
+ }
+ llvm_unreachable("Unknown transformer::IncludeFormat enum");
+}
+
+ASTEdit transformer::addInclude(RangeSelector Target, StringRef Header,
+ IncludeFormat Format) {
+ ASTEdit E;
+ E.Kind = EditKind::AddInclude;
+ E.TargetRange = Target;
+ E.Replacement = makeText(formatHeaderPath(Header, Format));
+ return E;
}
-RewriteRule transformer::makeRule(ast_matchers::internal::DynTypedMatcher M,
- EditGenerator Edits,
+RewriteRule transformer::makeRule(DynTypedMatcher M, EditGenerator Edits,
TextGenerator Explanation) {
- return RewriteRule{{RewriteRule::Case{
- std::move(M), std::move(Edits), std::move(Explanation), {}}}};
+ return RewriteRule{{RewriteRule::Case{std::move(M), std::move(Edits),
+ std::move(Explanation)}}};
+}
+
+namespace {
+
+/// Unconditionally binds the given node set before trying `InnerMatcher` and
+/// keeps the bound nodes on a successful match.
+template <typename T>
+class BindingsMatcher : public ast_matchers::internal::MatcherInterface<T> {
+ ast_matchers::BoundNodes Nodes;
+ const ast_matchers::internal::Matcher<T> InnerMatcher;
+
+public:
+ explicit BindingsMatcher(ast_matchers::BoundNodes Nodes,
+ ast_matchers::internal::Matcher<T> InnerMatcher)
+ : Nodes(std::move(Nodes)), InnerMatcher(std::move(InnerMatcher)) {}
+
+ bool matches(
+ const T &Node, ast_matchers::internal::ASTMatchFinder *Finder,
+ ast_matchers::internal::BoundNodesTreeBuilder *Builder) const override {
+ ast_matchers::internal::BoundNodesTreeBuilder Result(*Builder);
+ for (const auto &N : Nodes.getMap())
+ Result.setBinding(N.first, N.second);
+ if (InnerMatcher.matches(Node, Finder, &Result)) {
+ *Builder = std::move(Result);
+ return true;
+ }
+ return false;
+ }
+};
+
+/// Matches nodes of type T that have at least one descendant node for which the
+/// given inner matcher matches. Will match for each descendant node that
+/// matches. Based on ForEachDescendantMatcher, but takes a dynamic matcher,
+/// instead of a static one, because it is used by RewriteRule, which carries
+/// (only top-level) dynamic matchers.
+template <typename T>
+class DynamicForEachDescendantMatcher
+ : public ast_matchers::internal::MatcherInterface<T> {
+ const DynTypedMatcher DescendantMatcher;
+
+public:
+ explicit DynamicForEachDescendantMatcher(DynTypedMatcher DescendantMatcher)
+ : DescendantMatcher(std::move(DescendantMatcher)) {}
+
+ bool matches(
+ const T &Node, ast_matchers::internal::ASTMatchFinder *Finder,
+ ast_matchers::internal::BoundNodesTreeBuilder *Builder) const override {
+ return Finder->matchesDescendantOf(
+ Node, this->DescendantMatcher, Builder,
+ ast_matchers::internal::ASTMatchFinder::BK_All);
+ }
+};
+
+template <typename T>
+ast_matchers::internal::Matcher<T>
+forEachDescendantDynamically(ast_matchers::BoundNodes Nodes,
+ DynTypedMatcher M) {
+ return ast_matchers::internal::makeMatcher(new BindingsMatcher<T>(
+ std::move(Nodes),
+ ast_matchers::internal::makeMatcher(
+ new DynamicForEachDescendantMatcher<T>(std::move(M)))));
+}
+
+class ApplyRuleCallback : public MatchFinder::MatchCallback {
+public:
+ ApplyRuleCallback(RewriteRule Rule) : Rule(std::move(Rule)) {}
+
+ template <typename T>
+ void registerMatchers(const ast_matchers::BoundNodes &Nodes,
+ MatchFinder *MF) {
+ for (auto &Matcher : transformer::detail::buildMatchers(Rule))
+ MF->addMatcher(forEachDescendantDynamically<T>(Nodes, Matcher), this);
+ }
+
+ void run(const MatchFinder::MatchResult &Result) override {
+ if (!Edits)
+ return;
+ transformer::RewriteRule::Case Case =
+ transformer::detail::findSelectedCase(Result, Rule);
+ auto Transformations = Case.Edits(Result);
+ if (!Transformations) {
+ Edits = Transformations.takeError();
+ return;
+ }
+ Edits->append(Transformations->begin(), Transformations->end());
+ }
+
+ RewriteRule Rule;
+
+ // Initialize to a non-error state.
+ Expected<SmallVector<Edit, 1>> Edits = SmallVector<Edit, 1>();
+};
+} // namespace
+
+template <typename T>
+llvm::Expected<SmallVector<clang::transformer::Edit, 1>>
+rewriteDescendantsImpl(const T &Node, RewriteRule Rule,
+ const MatchResult &Result) {
+ ApplyRuleCallback Callback(std::move(Rule));
+ MatchFinder Finder;
+ Callback.registerMatchers<T>(Result.Nodes, &Finder);
+ Finder.match(Node, *Result.Context);
+ return std::move(Callback.Edits);
+}
+
+llvm::Expected<SmallVector<clang::transformer::Edit, 1>>
+transformer::detail::rewriteDescendants(const Decl &Node, RewriteRule Rule,
+ const MatchResult &Result) {
+ return rewriteDescendantsImpl(Node, std::move(Rule), Result);
+}
+
+llvm::Expected<SmallVector<clang::transformer::Edit, 1>>
+transformer::detail::rewriteDescendants(const Stmt &Node, RewriteRule Rule,
+ const MatchResult &Result) {
+ return rewriteDescendantsImpl(Node, std::move(Rule), Result);
+}
+
+llvm::Expected<SmallVector<clang::transformer::Edit, 1>>
+transformer::detail::rewriteDescendants(const TypeLoc &Node, RewriteRule Rule,
+ const MatchResult &Result) {
+ return rewriteDescendantsImpl(Node, std::move(Rule), Result);
+}
+
+llvm::Expected<SmallVector<clang::transformer::Edit, 1>>
+transformer::detail::rewriteDescendants(const DynTypedNode &DNode,
+ RewriteRule Rule,
+ const MatchResult &Result) {
+ if (const auto *Node = DNode.get<Decl>())
+ return rewriteDescendantsImpl(*Node, std::move(Rule), Result);
+ if (const auto *Node = DNode.get<Stmt>())
+ return rewriteDescendantsImpl(*Node, std::move(Rule), Result);
+ if (const auto *Node = DNode.get<TypeLoc>())
+ return rewriteDescendantsImpl(*Node, std::move(Rule), Result);
+
+ return llvm::make_error<llvm::StringError>(
+ llvm::errc::invalid_argument,
+ "type unsupported for recursive rewriting, Kind=" +
+ DNode.getNodeKind().asStringRef());
+}
+
+EditGenerator transformer::rewriteDescendants(std::string NodeId,
+ RewriteRule Rule) {
+ return [NodeId = std::move(NodeId),
+ Rule = std::move(Rule)](const MatchResult &Result)
+ -> llvm::Expected<SmallVector<clang::transformer::Edit, 1>> {
+ const ast_matchers::BoundNodes::IDToNodeMap &NodesMap =
+ Result.Nodes.getMap();
+ auto It = NodesMap.find(NodeId);
+ if (It == NodesMap.end())
+ return llvm::make_error<llvm::StringError>(llvm::errc::invalid_argument,
+ "ID not bound: " + NodeId);
+ return detail::rewriteDescendants(It->second, std::move(Rule), Result);
+ };
}
void transformer::addInclude(RewriteRule &Rule, StringRef Header,
- IncludeFormat Format) {
+ IncludeFormat Format) {
for (auto &Case : Rule.Cases)
- Case.AddedIncludes.emplace_back(Header.str(), Format);
+ Case.Edits = flatten(std::move(Case.Edits), addInclude(Header, Format));
}
#ifndef NDEBUG
@@ -123,7 +347,7 @@ static bool hasValidKind(const DynTypedMatcher &M) {
static std::vector<DynTypedMatcher> taggedMatchers(
StringRef TagBase,
const SmallVectorImpl<std::pair<size_t, RewriteRule::Case>> &Cases,
- ast_type_traits::TraversalKind DefaultTraversalKind) {
+ TraversalKind DefaultTraversalKind) {
std::vector<DynTypedMatcher> Matchers;
Matchers.reserve(Cases.size());
for (const auto &Case : Cases) {
@@ -167,18 +391,16 @@ transformer::detail::buildMatchers(const RewriteRule &Rule) {
// Each anyOf explicitly controls the traversal kind. The anyOf itself is set
// to `TK_AsIs` to ensure no nodes are skipped, thereby deferring to the kind
// of the branches. Then, each branch is either left as is, if the kind is
- // already set, or explicitly set to `TK_IgnoreUnlessSpelledInSource`. We
- // choose this setting, because we think it is the one most friendly to
- // beginners, who are (largely) the target audience of Transformer.
+ // already set, or explicitly set to `TK_AsIs`. We choose this setting because
+ // it is the default interpretation of matchers.
std::vector<DynTypedMatcher> Matchers;
for (const auto &Bucket : Buckets) {
DynTypedMatcher M = DynTypedMatcher::constructVariadic(
DynTypedMatcher::VO_AnyOf, Bucket.first,
- taggedMatchers("Tag", Bucket.second, TK_IgnoreUnlessSpelledInSource));
+ taggedMatchers("Tag", Bucket.second, TK_AsIs));
M.setAllowBind(true);
// `tryBind` is guaranteed to succeed, because `AllowBind` was set to true.
- Matchers.push_back(
- M.tryBind(RewriteRule::RootID)->withTraversalKind(TK_AsIs));
+ Matchers.push_back(M.tryBind(RootID)->withTraversalKind(TK_AsIs));
}
return Matchers;
}
@@ -191,7 +413,7 @@ DynTypedMatcher transformer::detail::buildMatcher(const RewriteRule &Rule) {
SourceLocation transformer::detail::getRuleMatchLoc(const MatchResult &Result) {
auto &NodesMap = Result.Nodes.getMap();
- auto Root = NodesMap.find(RewriteRule::RootID);
+ auto Root = NodesMap.find(RootID);
assert(Root != NodesMap.end() && "Transformation failed: missing root node.");
llvm::Optional<CharSourceRange> RootRange = tooling::getRangeForEdit(
CharSourceRange::getTokenRange(Root->second.getSourceRange()),
@@ -221,8 +443,4 @@ transformer::detail::findSelectedCase(const MatchResult &Result,
llvm_unreachable("No tag found for this rule.");
}
-constexpr llvm::StringLiteral RewriteRule::RootID;
-
-TextGenerator tooling::text(std::string M) {
- return std::make_shared<SimpleTextGenerator>(std::move(M));
-}
+const llvm::StringRef RewriteRule::RootID = ::clang::transformer::RootID;
diff --git a/clang/lib/Tooling/Transformer/Stencil.cpp b/clang/lib/Tooling/Transformer/Stencil.cpp
index 2670bf7adabf..d46087e4b04b 100644
--- a/clang/lib/Tooling/Transformer/Stencil.cpp
+++ b/clang/lib/Tooling/Transformer/Stencil.cpp
@@ -63,6 +63,7 @@ enum class UnaryNodeOperator {
MaybeDeref,
AddressOf,
MaybeAddressOf,
+ Describe,
};
// Generic container for stencil operations with a (single) node-id argument.
@@ -133,6 +134,9 @@ std::string toStringData(const UnaryOperationData &Data) {
case UnaryNodeOperator::MaybeAddressOf:
OpName = "maybeAddressOf";
break;
+ case UnaryNodeOperator::Describe:
+ OpName = "describe";
+ break;
}
return (OpName + "(\"" + Data.Id + "\")").str();
}
@@ -174,11 +178,11 @@ Error evalData(const RawTextData &Data, const MatchFinder::MatchResult &,
return Error::success();
}
-Error evalData(const DebugPrintNodeData &Data,
- const MatchFinder::MatchResult &Match, std::string *Result) {
+static Error printNode(StringRef Id, const MatchFinder::MatchResult &Match,
+ std::string *Result) {
std::string Output;
llvm::raw_string_ostream Os(Output);
- auto NodeOrErr = getNode(Match.Nodes, Data.Id);
+ auto NodeOrErr = getNode(Match.Nodes, Id);
if (auto Err = NodeOrErr.takeError())
return Err;
NodeOrErr->print(Os, PrintingPolicy(Match.Context->getLangOpts()));
@@ -186,8 +190,36 @@ Error evalData(const DebugPrintNodeData &Data,
return Error::success();
}
+Error evalData(const DebugPrintNodeData &Data,
+ const MatchFinder::MatchResult &Match, std::string *Result) {
+ return printNode(Data.Id, Match, Result);
+}
+
+// FIXME: Consider memoizing this function using the `ASTContext`.
+static bool isSmartPointerType(QualType Ty, ASTContext &Context) {
+ using namespace ::clang::ast_matchers;
+
+ // Optimization: hard-code common smart-pointer types. This can/should be
+ // removed if we start caching the results of this function.
+ auto KnownSmartPointer =
+ cxxRecordDecl(hasAnyName("::std::unique_ptr", "::std::shared_ptr"));
+ const auto QuacksLikeASmartPointer = cxxRecordDecl(
+ hasMethod(cxxMethodDecl(hasOverloadedOperatorName("->"),
+ returns(qualType(pointsTo(type()))))),
+ hasMethod(cxxMethodDecl(hasOverloadedOperatorName("*"),
+ returns(qualType(references(type()))))));
+ const auto SmartPointer = qualType(hasDeclaration(
+ cxxRecordDecl(anyOf(KnownSmartPointer, QuacksLikeASmartPointer))));
+ return match(SmartPointer, Ty, Context).size() > 0;
+}
+
Error evalData(const UnaryOperationData &Data,
const MatchFinder::MatchResult &Match, std::string *Result) {
+ // The `Describe` operation can be applied to any node, not just expressions,
+ // so it is handled here, separately.
+ if (Data.Op == UnaryNodeOperator::Describe)
+ return printNode(Data.Id, Match, Result);
+
const auto *E = Match.Nodes.getNodeAs<Expr>(Data.Id);
if (E == nullptr)
return llvm::make_error<StringError>(
@@ -201,22 +233,44 @@ Error evalData(const UnaryOperationData &Data,
Source = tooling::buildDereference(*E, *Match.Context);
break;
case UnaryNodeOperator::MaybeDeref:
- if (!E->getType()->isAnyPointerType()) {
- *Result += tooling::getText(*E, *Match.Context);
- return Error::success();
+ if (E->getType()->isAnyPointerType() ||
+ isSmartPointerType(E->getType(), *Match.Context)) {
+ // Strip off any operator->. This can only occur inside an actual arrow
+ // member access, so we treat it as equivalent to an actual object
+ // expression.
+ if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(E)) {
+ if (OpCall->getOperator() == clang::OO_Arrow &&
+ OpCall->getNumArgs() == 1) {
+ E = OpCall->getArg(0);
+ }
+ }
+ Source = tooling::buildDereference(*E, *Match.Context);
+ break;
}
- Source = tooling::buildDereference(*E, *Match.Context);
- break;
+ *Result += tooling::getText(*E, *Match.Context);
+ return Error::success();
case UnaryNodeOperator::AddressOf:
Source = tooling::buildAddressOf(*E, *Match.Context);
break;
case UnaryNodeOperator::MaybeAddressOf:
- if (E->getType()->isAnyPointerType()) {
+ if (E->getType()->isAnyPointerType() ||
+ isSmartPointerType(E->getType(), *Match.Context)) {
+ // Strip off any operator->. This can only occur inside an actual arrow
+ // member access, so we treat it as equivalent to an actual object
+ // expression.
+ if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(E)) {
+ if (OpCall->getOperator() == clang::OO_Arrow &&
+ OpCall->getNumArgs() == 1) {
+ E = OpCall->getArg(0);
+ }
+ }
*Result += tooling::getText(*E, *Match.Context);
return Error::success();
}
Source = tooling::buildAddressOf(*E, *Match.Context);
break;
+ case UnaryNodeOperator::Describe:
+ llvm_unreachable("This case is handled at the start of the function");
}
if (!Source)
return llvm::make_error<StringError>(
@@ -359,6 +413,11 @@ Stencil transformer::maybeAddressOf(llvm::StringRef ExprId) {
UnaryNodeOperator::MaybeAddressOf, std::string(ExprId));
}
+Stencil transformer::describe(StringRef Id) {
+ return std::make_shared<StencilImpl<UnaryOperationData>>(
+ UnaryNodeOperator::Describe, std::string(Id));
+}
+
Stencil transformer::access(StringRef BaseId, Stencil Member) {
return std::make_shared<StencilImpl<AccessData>>(BaseId, std::move(Member));
}
diff --git a/clang/lib/Tooling/Transformer/Transformer.cpp b/clang/lib/Tooling/Transformer/Transformer.cpp
index e8fc00c4e953..7a4d8b45f189 100644
--- a/clang/lib/Tooling/Transformer/Transformer.cpp
+++ b/clang/lib/Tooling/Transformer/Transformer.cpp
@@ -38,13 +38,8 @@ void Transformer::run(const MatchFinder::MatchResult &Result) {
return;
}
- if (Transformations->empty()) {
- // No rewrite applied (but no error encountered either).
- transformer::detail::getRuleMatchLoc(Result).print(
- llvm::errs() << "note: skipping match at loc ", *Result.SourceManager);
- llvm::errs() << "\n";
+ if (Transformations->empty())
return;
- }
// Group the transformations, by file, into AtomicChanges, each anchored by
// the location of the first change in that file.
@@ -56,29 +51,20 @@ void Transformer::run(const MatchFinder::MatchResult &Result) {
T.Range.getBegin(), T.Metadata))
.first;
auto &AC = Iter->second;
- if (auto Err = AC.replace(*Result.SourceManager, T.Range, T.Replacement)) {
- Consumer(std::move(Err));
- return;
- }
- }
-
- for (auto &IDChangePair : ChangesByFileID) {
- auto &AC = IDChangePair.second;
- // FIXME: this will add includes to *all* changed files, which may not be
- // the intent. We should upgrade the representation to allow associating
- // headers with specific edits.
- for (const auto &I : Case.AddedIncludes) {
- auto &Header = I.first;
- switch (I.second) {
- case transformer::IncludeFormat::Quoted:
- AC.addHeader(Header);
- break;
- case transformer::IncludeFormat::Angled:
- AC.addHeader((llvm::Twine("<") + Header + ">").str());
- break;
+ switch (T.Kind) {
+ case transformer::EditKind::Range:
+ if (auto Err =
+ AC.replace(*Result.SourceManager, T.Range, T.Replacement)) {
+ Consumer(std::move(Err));
+ return;
}
+ break;
+ case transformer::EditKind::AddInclude:
+ AC.addHeader(T.Replacement);
+ break;
}
-
- Consumer(std::move(AC));
}
+
+ for (auto &IDChangePair : ChangesByFileID)
+ Consumer(std::move(IDChangePair.second));
}